Inversion: Linear: IRLSΒΆ

Here we go over the basics of creating a linear problem and inversion.

../../../_images/sphx_glr_plot_inversion_linear_irls_001.png

Out:

SimPEG.InvProblem is setting bfgsH0 to the inverse of the eval2Deriv.
                    ***Done using same Solver and solverOpts as the problem***
model has any nan: 0
=============================== Projected GNCG ===============================
  #     beta     phi_d     phi_m       f      |proj(x-g)-x|  LS    Comment
-----------------------------------------------------------------------------
x0 has any nan: 0
   0  2.78e+04  5.36e+05  1.34e-08  5.36e+05    2.00e+01      0
   1  1.39e+04  7.07e+02  1.79e-01  3.19e+03    1.78e+01      0
   2  6.94e+03  2.66e+02  1.99e-01  1.65e+03    1.78e+01      0   Skip BFGS
   3  3.47e+03  9.77e+01  2.15e-01  8.44e+02    1.75e+01      0   Skip BFGS
   4  1.74e+03  3.69e+01  2.27e-01  4.31e+02    1.65e+01      0   Skip BFGS
   5  8.68e+02  1.49e+01  2.36e-01  2.19e+02    1.42e+01      0   Skip BFGS
Convergence with smooth l2-norm regularization: Start IRLS steps...
L[p qx qy qz]-norm : [0.0, 0.0, 2.0, 2.0]
eps_p: [0.017606695476258656, 0.1, 0.1] eps_q: [0.0056400498739047761, 0.1, 0.1]
Regularization decrease: 0.000e+00
   6  1.09e+03  7.98e+00  2.36e-01  2.64e+02    1.95e+01      0   Skip BFGS
   7  1.09e+03  9.17e+00  1.28e-01  1.49e+02    1.96e+01      0
   8  1.09e+03  1.03e+01  1.10e-01  1.30e+02    1.90e+01      0
Regularization decrease: 5.610e-01
   9  1.01e+03  1.08e+01  1.10e-01  1.22e+02    1.93e+01      0
  10  1.01e+03  1.40e+01  7.91e-02  9.38e+01    1.86e+01      0
  11  1.01e+03  1.33e+01  7.91e-02  9.32e+01    1.74e+01      0
Regularization decrease: 2.546e-01
  12  7.65e+02  1.32e+01  7.91e-02  7.37e+01    1.90e+01      0
  13  7.65e+02  1.30e+01  6.66e-02  6.39e+01    1.73e+01      0
  14  7.65e+02  1.20e+01  6.68e-02  6.31e+01    1.86e+01      0
Regularization decrease: 1.626e-01
  15  6.31e+02  1.21e+01  6.68e-02  5.43e+01    1.89e+01      0
  16  6.31e+02  1.13e+01  5.88e-02  4.84e+01    1.84e+01      0
  17  6.31e+02  1.11e+01  5.84e-02  4.80e+01    1.92e+01      0
Regularization decrease: 1.156e-01
  18  5.84e+02  1.08e+01  5.84e-02  4.50e+01    1.69e+01      0
  19  5.84e+02  1.02e+01  5.33e-02  4.14e+01    1.51e+01      0
  20  5.84e+02  9.98e+00  5.33e-02  4.11e+01    1.81e+01      0
Regularization decrease: 8.820e-02
  21  5.84e+02  9.86e+00  5.33e-02  4.10e+01    1.50e+01      0
  22  5.84e+02  9.93e+00  5.11e-02  3.98e+01    1.66e+01      0
  23  5.84e+02  9.63e+00  5.13e-02  3.96e+01    1.72e+01      0
Regularization decrease: 4.382e-02
  24  5.84e+02  9.75e+00  5.13e-02  3.97e+01    1.72e+01      0
  25  5.84e+02  9.87e+00  5.00e-02  3.91e+01    1.71e+01      0
  26  5.84e+02  9.77e+00  4.98e-02  3.89e+01    1.63e+01      0
Regularization decrease: 2.230e-02
  27  5.84e+02  9.64e+00  4.98e-02  3.87e+01    1.68e+01      0
  28  5.84e+02  9.79e+00  4.86e-02  3.82e+01    1.56e+01      0
  29  5.84e+02  9.67e+00  4.84e-02  3.80e+01    1.52e+01      0
Regularization decrease: 3.179e-02
  30  5.84e+02  9.61e+00  4.84e-02  3.79e+01    1.62e+01      0
  31  5.84e+02  9.82e+00  4.69e-02  3.73e+01    1.81e+01      0
  32  5.84e+02  9.64e+00  4.66e-02  3.69e+01    1.68e+01      0
Regularization decrease: 3.782e-02
  33  5.84e+02  9.55e+00  4.66e-02  3.68e+01    1.73e+01      0
  34  5.84e+02  9.80e+00  4.42e-02  3.56e+01    1.97e+01      0
  35  5.84e+02  9.57e+00  4.39e-02  3.52e+01    2.10e+01      0
Regularization decrease: 6.464e-02
  36  5.84e+02  9.69e+00  4.39e-02  3.53e+01    1.59e+01      0
  37  5.84e+02  9.97e+00  4.17e-02  3.43e+01    1.85e+01      0
  38  5.84e+02  9.87e+00  4.18e-02  3.43e+01    1.66e+01      0
Regularization decrease: 3.908e-02
  39  5.84e+02  9.93e+00  4.18e-02  3.44e+01    8.42e+00      0
  40  5.84e+02  1.01e+01  4.08e-02  3.40e+01    1.04e+01      0
  41  5.84e+02  1.01e+01  4.09e-02  3.39e+01    2.15e+00      0
Regularization decrease: 2.025e-02
  42  5.84e+02  1.01e+01  4.09e-02  3.39e+01    3.78e+00      0
  43  5.84e+02  1.02e+01  3.99e-02  3.35e+01    6.70e+00      0
  44  5.84e+02  1.01e+01  3.99e-02  3.34e+01    1.99e+00      0
Regularization decrease: 2.382e-02
  45  5.84e+02  1.01e+01  3.99e-02  3.35e+01    3.05e+00      0
  46  5.84e+02  1.02e+01  3.97e-02  3.34e+01    2.16e+00      0
  47  5.84e+02  1.02e+01  3.97e-02  3.34e+01    4.89e-01      0
Regularization decrease: 4.121e-03
Reach maximum number of IRLS cycles: 15
------------------------- STOP! -------------------------
1 : |fc-fOld| = 0.0000e+00 <= tolF*(1+|f0|) = 5.3623e+04
1 : |xc-x_last| = 2.3193e-04 <= tolX*(1+|x0|) = 1.0010e-01
0 : |proj(x-g)-x|    = 4.8907e-01 <= tolG          = 1.0000e-01
0 : |proj(x-g)-x|    = 4.8907e-01 <= 1e3*eps       = 1.0000e-02
0 : maxIter   =     100    <= iter          =     48
------------------------- DONE! -------------------------
Final misfit:10.1606298213

from __future__ import print_function

import numpy as np
import matplotlib.pyplot as plt

from SimPEG import Mesh
from SimPEG import Problem
from SimPEG import Survey
from SimPEG import DataMisfit
from SimPEG import Directives
from SimPEG import Optimization
from SimPEG import Regularization
from SimPEG import InvProblem
from SimPEG import Inversion


def run(N=100, plotIt=True):

    np.random.seed(1)

    std_noise = 1e-2

    mesh = Mesh.TensorMesh([N])

    m0 = np.ones(mesh.nC) * 1e-4
    mref = np.zeros(mesh.nC)

    nk = 20
    jk = np.linspace(1., 60., nk)
    p = -0.25
    q = 0.25

    def g(k):
        return (
            np.exp(p*jk[k]*mesh.vectorCCx) *
            np.cos(np.pi*q*jk[k]*mesh.vectorCCx)
        )

    G = np.empty((nk, mesh.nC))

    for i in range(nk):
        G[i, :] = g(i)

    mtrue = np.zeros(mesh.nC)
    mtrue[mesh.vectorCCx > 0.3] = 1.
    mtrue[mesh.vectorCCx > 0.45] = -0.5
    mtrue[mesh.vectorCCx > 0.6] = 0

    prob = Problem.LinearProblem(mesh, G=G)
    survey = Survey.LinearSurvey()
    survey.pair(prob)
    survey.dobs = prob.fields(mtrue) + std_noise * np.random.randn(nk)

    wd = np.ones(nk) * std_noise

    # Distance weighting
    wr = np.sum(prob.G**2., axis=0)**0.5
    wr = wr/np.max(wr)

    dmis = DataMisfit.l2_DataMisfit(survey)
    dmis.Wd = 1./wd

    betaest = Directives.BetaEstimate_ByEig(beta0_ratio=1e-2)

    reg = Regularization.Sparse(mesh)
    reg.mref = mref
    reg.cell_weights = wr

    reg.mref = np.zeros(mesh.nC)

    opt = Optimization.ProjectedGNCG(
        maxIter=100, lower=-2., upper=2.,
        maxIterLS=20, maxIterCG=10, tolCG=1e-3
    )
    invProb = InvProblem.BaseInvProblem(dmis, reg, opt)
    update_Jacobi = Directives.Update_lin_PreCond()

    # Set the IRLS directive, penalize the lowest 25 percentile of model values
    # Start with an l2-l2, then switch to lp-norms
    norms = [0., 0., 2., 2.]
    IRLS = Directives.Update_IRLS(
        norms=norms, prctile=25, maxIRLSiter=15, minGNiter=3
    )

    inv = Inversion.BaseInversion(
        invProb,
        directiveList=[IRLS, betaest, update_Jacobi]
    )

    # Run inversion
    mrec = inv.run(m0)

    print("Final misfit:" + str(invProb.dmisfit.eval(mrec)))

    if plotIt:
        fig, axes = plt.subplots(1, 2, figsize=(12*1.2, 4*1.2))
        for i in range(prob.G.shape[0]):
            axes[0].plot(prob.G[i, :])
        axes[0].set_title('Columns of matrix G')

        axes[1].plot(mesh.vectorCCx, mtrue, 'b-')
        axes[1].plot(mesh.vectorCCx, reg.l2model, 'r-')
        # axes[1].legend(('True Model', 'Recovered Model'))
        axes[1].set_ylim(-1.0, 1.25)

        axes[1].plot(mesh.vectorCCx, mrec, 'k-', lw=2)
        axes[1].legend(
            (
                'True Model',
                'Smooth l2-l2',
                'Sparse lp: {0}, lqx: {1}'.format(*reg.norms)
            ),
            fontsize=12
        )

    return prob, survey, mesh, mrec

if __name__ == '__main__':
    run()
    plt.show()

Total running time of the script: ( 0 minutes 4.221 seconds)

Generated by Sphinx-Gallery