-
Notifications
You must be signed in to change notification settings - Fork 69
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add equivalent layer for harmonic functions (#78)
Add an Equivalent Layer gridder for harmonic functions, based on verde.BaseGridder using 1/r as the basis function. By default it puts one point source bellow each observation point at a constant relative depth and fits the corresponding coefficient to each source. Also a custom set of point sources can be passed. Add a gallery example showing how to grid a small region of the magnetic anomaly data from Rio de Janeiro.
- Loading branch information
1 parent
4e282a0
commit a618988
Showing
8 changed files
with
472 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
Equivalent Layers | ||
----------------- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,97 @@ | ||
""" | ||
Gridding and upward continuation | ||
================================ | ||
Most potential field surveys gather data along scattered and uneven flight lines or | ||
ground measurements. For a great number of applications we may need to interpolate these | ||
data points onto a regular grid at a constant altitude. Upward-continuation is also a | ||
routine task for smoothing, noise attenuation, source separation, etc. | ||
Both tasks can be done simultaneously through an *equivalent layer* [Dampney1969]_. We | ||
will use :class:`harmonica.EQLHarmonic` to estimate the coefficients of a set of point | ||
sources (the equivalent layer) that fit the observed data. The fitted layer can then be | ||
used to predict data values wherever we want, like on a grid at a certain altitude. The | ||
sources for :class:`~harmonica.EQLHarmonic` in particular are placed one beneath each | ||
data point at a constant relative depth from the elevation of the data point following | ||
[Cooper2000]_. | ||
The advantage of using an equivalent layer is that it takes into account the 3D nature | ||
of the observations, not just their horizontal positions. It also allows data | ||
uncertainty to be taken into account and noise to be suppressed though the least-squares | ||
fitting process. The main disadvantage is the increased computational load (both in | ||
terms of time and memory). | ||
""" | ||
import matplotlib.pyplot as plt | ||
import numpy as np | ||
import pyproj | ||
import verde as vd | ||
import harmonica as hm | ||
|
||
|
||
# Fetch the sample total-field magnetic anomaly data from Rio de Janeiro | ||
data = hm.datasets.fetch_rio_magnetic() | ||
|
||
# Slice a smaller portion of the survey data to speed-up calculations for this example | ||
region = [-42.35, -42.10, -22.35, -22.15] | ||
inside = vd.inside((data.longitude, data.latitude), region) | ||
data = data[inside] | ||
print("Number of data points:", data.shape[0]) | ||
|
||
# Since this is a small area, we'll project our data and use Cartesian coordinates | ||
projection = pyproj.Proj(proj="merc", lat_ts=data.latitude.mean()) | ||
easting, northing = projection(data.longitude.values, data.latitude.values) | ||
coordinates = (easting, northing, data.altitude_m) | ||
|
||
# Create the equivalent layer. We'll use the default point source configuration at a | ||
# constant relative depth beneath each observation point. The damping parameter helps | ||
# smooth the predicted data and ensure stability. | ||
eql = hm.EQLHarmonic(relative_depth=1000, damping=10) | ||
|
||
# Fit the layer coefficients to the observed magnetic anomaly. | ||
eql.fit(coordinates, data.total_field_anomaly_nt) | ||
|
||
# Evaluate the data fit by calculating an R² score against the observed data. | ||
# This is a measure of how well layer the fits the data NOT how good the interpolation | ||
# will be. | ||
print("R² score:", eql.score(coordinates, data.total_field_anomaly_nt)) | ||
|
||
# Interpolate data on a regular grid with 400 m spacing. The interpolation requires an | ||
# extra coordinate (upward height). By passing in 500 m, we're effectively | ||
# upward-continuing the data (mean flight height is 200 m). | ||
grid = eql.grid(spacing=400, data_names=["magnetic_anomaly"], extra_coords=500) | ||
|
||
# The grid is a xarray.Dataset with values, coordinates, and metadata | ||
print("\nGenerated grid:\n", grid) | ||
|
||
# Plot original magnetic anomaly and the gridded and upward-continued version | ||
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 6), sharey=True) | ||
|
||
# Get the maximum absolute value between the original and gridded data so we can use the | ||
# same color scale for both plots and have 0 centered at the white color. | ||
maxabs = vd.maxabs(data.total_field_anomaly_nt, grid.magnetic_anomaly.values) | ||
|
||
ax1.set_title("Observed magnetic anomaly data") | ||
tmp = ax1.scatter( | ||
easting, | ||
northing, | ||
c=data.total_field_anomaly_nt, | ||
s=20, | ||
vmin=-maxabs, | ||
vmax=maxabs, | ||
cmap="seismic", | ||
) | ||
plt.colorbar(tmp, ax=ax1, label="nT", pad=0.05, aspect=40, orientation="horizontal") | ||
|
||
ax2.set_title("Gridded and upward-continued") | ||
tmp = grid.magnetic_anomaly.plot.pcolormesh( | ||
ax=ax2, | ||
add_colorbar=False, | ||
add_labels=False, | ||
vmin=-maxabs, | ||
vmax=maxabs, | ||
cmap="seismic", | ||
) | ||
plt.colorbar(tmp, ax=ax2, label="nT", pad=0.05, aspect=40, orientation="horizontal") | ||
|
||
plt.tight_layout(pad=0) | ||
plt.show() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,236 @@ | ||
""" | ||
Equivalent layer for generic harmonic functions | ||
""" | ||
import numpy as np | ||
from numba import jit | ||
from sklearn.utils.validation import check_is_fitted | ||
import verde as vd | ||
import verde.base as vdb | ||
|
||
from ..forward.utils import distance_cartesian | ||
|
||
|
||
class EQLHarmonic(vdb.BaseGridder): | ||
r""" | ||
Equivalent-layer for generic harmonic functions (gravity, magnetics, etc). | ||
This equivalent layer can be used for: | ||
* Cartesian coordinates (geographic coordinates must be project before use) | ||
* Gravity and magnetic data (including derivatives) | ||
* Single data types | ||
* Interpolation | ||
* Upward continuation | ||
* Finite-difference based derivative calculations | ||
It cannot be used for: | ||
* Joint inversion of multiple data types (e.g., gravity + gravity gradients) | ||
* Reduction to the pole of magnetic total field anomaly data | ||
* Analytical derivative calculations | ||
Point sources are located beneath the observed potential-field measurement points by | ||
default [Cooper2000]_. Custom source locations can be used by specifying the | ||
*points* argument. Coefficients associated with each point source are estimated | ||
through linear least-squares with damping (Tikhonov 0th order) regularization. | ||
The Green's function for point mass effects used is the inverse Cartesian distance | ||
between the grid coordinates and the point source: | ||
.. math:: | ||
\phi(\bar{x}, \bar{x}') = \frac{1}{||\bar{x} - \bar{x}'||} | ||
where :math:`\bar{x}` and :math:`\bar{x}'` are the coordinate vectors of the | ||
observation point and the source, respectively. | ||
Parameters | ||
---------- | ||
damping : None or float | ||
The positive damping regularization parameter. Controls how much smoothness is | ||
imposed on the estimated coefficients. If None, no regularization is used. | ||
points : None or list of arrays (optional) | ||
List containing the coordinates of the point sources used as the equivalent | ||
layer. Coordinates are assumed to be in the following order: (``easting``, | ||
``northing``, ``upward``). If None, will place one | ||
point source bellow each observation point at a fixed relative depth bellow the | ||
observation point [Cooper2000]_. Defaults to None. | ||
relative_depth : float | ||
Relative depth at which the point sources are placed beneath the observation | ||
points. Each source point will be set beneath each data point at a depth | ||
calculated as the elevation of the data point minus this constant | ||
*relative_depth*. Use positive numbers (negative numbers would mean point sources | ||
are above the data points). Ignored if *points* is specified. | ||
Attributes | ||
---------- | ||
points_ : 2d-array | ||
Coordinates of the point sources used to build the equivalent layer. | ||
coefs_ : array | ||
Estimated coefficients of every point source. | ||
region_ : tuple | ||
The boundaries (``[W, E, S, N]``) of the data used to fit the interpolator. | ||
Used as the default region for the :meth:`~harmonica.HarmonicEQL.grid` and | ||
:meth:`~harmonica.HarmonicEQL.scatter` methods. | ||
""" | ||
|
||
def __init__(self, damping=None, points=None, relative_depth=500): | ||
self.damping = damping | ||
self.points = points | ||
self.relative_depth = relative_depth | ||
|
||
def fit(self, coordinates, data, weights=None): | ||
""" | ||
Fit the coefficients of the equivalent layer. | ||
The data region is captured and used as default for the | ||
:meth:`~harmonica.HarmonicEQL.grid` and :meth:`~harmonica.HarmonicEQL.scatter` | ||
methods. | ||
All input arrays must have the same shape. | ||
Parameters | ||
---------- | ||
coordinates : tuple of arrays | ||
Arrays with the coordinates of each data point. Should be in the | ||
following order: (easting, northing, upward, ...). Only easting, | ||
northing, and upward will be used, all subsequent coordinates will be | ||
ignored. | ||
data : array | ||
The data values of each data point. | ||
weights : None or array | ||
If not None, then the weights assigned to each data point. | ||
Typically, this should be 1 over the data uncertainty squared. | ||
Returns | ||
------- | ||
self | ||
Returns this estimator instance for chaining operations. | ||
""" | ||
coordinates, data, weights = vdb.check_fit_input(coordinates, data, weights) | ||
# Capture the data region to use as a default when gridding. | ||
self.region_ = vd.get_region(coordinates[:2]) | ||
coordinates = vdb.n_1d_arrays(coordinates, 3) | ||
if self.points is None: | ||
self.points_ = ( | ||
coordinates[0], | ||
coordinates[1], | ||
coordinates[2] - self.relative_depth, | ||
) | ||
else: | ||
self.points_ = vdb.n_1d_arrays(self.points, 3) | ||
jacobian = self.jacobian(coordinates, self.points_) | ||
self.coefs_ = vdb.least_squares(jacobian, data, weights, self.damping) | ||
return self | ||
|
||
def predict(self, coordinates): | ||
""" | ||
Evaluate the estimated equivalent layer on the given set of points. | ||
Requires a fitted estimator (see :meth:`~harmonica.HarmonicEQL.fit`). | ||
Parameters | ||
---------- | ||
coordinates : tuple of arrays | ||
Arrays with the coordinates of each data point. Should be in the | ||
following order: (``easting``, ``northing``, ``upward``, ...). Only | ||
``easting``, ``northing`` and ``upward`` will be used, all subsequent | ||
coordinates will be ignored. | ||
Returns | ||
------- | ||
data : array | ||
The data values evaluated on the given points. | ||
""" | ||
# We know the gridder has been fitted if it has the coefs_ | ||
check_is_fitted(self, ["coefs_"]) | ||
shape = np.broadcast(*coordinates[:3]).shape | ||
size = np.broadcast(*coordinates[:3]).size | ||
dtype = coordinates[0].dtype | ||
coordinates = tuple(np.atleast_1d(i).ravel() for i in coordinates[:3]) | ||
data = np.zeros(size, dtype=dtype) | ||
predict_numba(coordinates, self.points_, self.coefs_, data) | ||
return data.reshape(shape) | ||
|
||
def jacobian( | ||
self, coordinates, points, dtype="float64" | ||
): # pylint: disable=no-self-use | ||
""" | ||
Make the Jacobian matrix for the equivalent layer. | ||
Each column of the Jacobian is the Green's function for a single point source | ||
evaluated on all observation points. | ||
Parameters | ||
---------- | ||
coordinates : tuple of arrays | ||
Arrays with the coordinates of each data point. Should be in the | ||
following order: (``easting``, ``northing``, ``upward``, ...). Only | ||
``easting``, ``northing`` and ``upward`` will be used, all subsequent | ||
coordinates will be ignored. | ||
points : tuple of arrays | ||
Tuple of arrays containing the coordinates of the point sources used as | ||
equivalent layer in the following order: (``easting``, ``northing``, | ||
``upward``). | ||
dtype : str or numpy dtype | ||
The type of the Jacobian array. | ||
Returns | ||
------- | ||
jacobian : 2D array | ||
The (n_data, n_points) Jacobian matrix. | ||
""" | ||
n_data = coordinates[0].size | ||
n_points = points[0].size | ||
jac = np.zeros((n_data, n_points), dtype=dtype) | ||
jacobian_numba(coordinates, points, jac) | ||
return jac | ||
|
||
|
||
@jit(nopython=True) | ||
def predict_numba(coordinates, points, coeffs, result): | ||
""" | ||
Calculate the predicted data using numba for speeding things up. | ||
""" | ||
east, north, upward = coordinates[:] | ||
point_east, point_north, point_upward = points[:] | ||
for i in range(east.size): | ||
for j in range(point_east.size): | ||
result[i] += coeffs[j] * greens_func( | ||
east[i], | ||
north[i], | ||
upward[i], | ||
point_east[j], | ||
point_north[j], | ||
point_upward[j], | ||
) | ||
|
||
|
||
@jit(nopython=True) | ||
def greens_func(east, north, upward, point_east, point_north, point_upward): | ||
""" | ||
Calculate the Green's function for the equivalent layer using Numba. | ||
""" | ||
distance = distance_cartesian( | ||
(east, north, upward), (point_east, point_north, point_upward) | ||
) | ||
return 1 / distance | ||
|
||
|
||
@jit(nopython=True) | ||
def jacobian_numba(coordinates, points, jac): | ||
""" | ||
Calculate the Jacobian matrix using numba to speed things up. | ||
""" | ||
east, north, upward = coordinates[:] | ||
point_east, point_north, point_upward = points[:] | ||
for i in range(east.size): | ||
for j in range(point_east.size): | ||
jac[i, j] = greens_func( | ||
east[i], | ||
north[i], | ||
upward[i], | ||
point_east[j], | ||
point_north[j], | ||
point_upward[j], | ||
) |
Oops, something went wrong.