Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoSign in
Socket

impdar

Package Overview
Dependencies
Maintainers
1
Versions
26
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

impdar - pypi Package Compare versions

Comparing version
1.1.4.post3
to
1.1.4.post5
+173
impdar/lib/ApresData/__init__.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
#
# Distributed under terms of the GNU GPL-3.0 license.
"""
An alternative ImpDAR class for ApRES data.
This should be considered separate from impulse data.
This class has a different set of loading and filtering scripts.
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
Sept 24 2019
"""
import datetime
import numpy as np
from scipy.io import loadmat
from .ApresFlags import ApresFlags
from .ApresHeader import ApresHeader
from ..ImpdarError import ImpdarError
class ApresData(object):
"""A class that holds the relevant information for an ApRES acquisition.
We keep track of processing steps with the flags attribute.
This base version's __init__ takes a filename of a .mat file in the old StODeep format to load.
"""
#: Attributes that every ApresData object should have and should not be None.
attrs_guaranteed = ['data',
'decday',
'dt',
'snum',
'cnum',
'bnum',
'chirp_num',
'chirp_att',
'chirp_time',
'travel_time',
'frequencies']
#: Optional attributes that may be None without affecting processing.
#: These may not have existed in old StoDeep files that we are compatible with,
#: and they often cannot be set at the initial data load.
#: If they exist, they all have units of meters.
attrs_optional = ['lat',
'long',
'x_coord',
'y_coord',
'elev',
'temperature1',
'temperature2',
'battery_voltage']
# TODO: add imports
#from ._ApresDataProcessing import
#from ._ApresDataSaving import
# Now make some load/save methods that will work with the matlab format
def __init__(self, fn_mat):
if fn_mat is None:
# Write these out so we can document them
# Very basics
self.snum = None #: int number of samples per chirp
self.cnum = None #: int, the number of chirps in a burst
self.bnum = None #: int, the number of bursts
self.data = None #: np.ndarray(snum x tnum) of the actual return power
self.dt = None #: float, The spacing between samples in travel time, in seconds
# Per-trace attributes
#: np.ndarray(tnum,) of the acquisition time of each trace
#: note that this is referenced to Jan 1, 0 CE (matlabe datenum)
#: for convenience, use the `datetime` attribute to access a python version of the day
self.decday = None
#: np.ndarray(tnum,) latitude along the profile. Generally not in projected coordinates
self.lat = None
#: np.ndarray(tnum,) longitude along the profile. Generally not in projected coords.
self.long = None
# chirp
self.chirp_num = None #: np.ndarray(cnum,) The 1-indexed number of the chirp
self.chirp_att = None #: np.ndarray(cnum,) Chirp attenuation settings
self.chirp_time = None #: np.ndarray(cnum,) Time at beginning of chirp (serial day)
# Sample-wise attributes
#: np.ndarray(snum,) The two way travel time to each sample, in us
self.travel_time = None
#: np.ndarray(tnum,) Optional. Projected x-coordinate along the profile.
self.x_coord = None
#: np.ndarray(tnum,) Optional. Projected y-coordinate along the profile.
self.y_coord = None
#: np.ndarray(tnum,) Optional. Elevation along the profile.
self.elev = None
# Special attributes
#: impdar.lib.RadarFlags object containing information about the processing steps done.
self.flags = ApresFlags()
self.header = ApresHeader()
self.data_dtype = None
return
# TODO: add a matlab load
mat = loadmat(fn_mat)
for attr in self.attrs_guaranteed:
if mat[attr].shape == (1, 1):
setattr(self, attr, mat[attr][0][0])
elif mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1:
setattr(self, attr, mat[attr].flatten())
else:
setattr(self, attr, mat[attr])
# We may have some additional variables
for attr in self.attrs_optional:
if attr in mat:
if mat[attr].shape == (1, 1):
setattr(self, attr, mat[attr][0][0])
elif mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1:
setattr(self, attr, mat[attr].flatten())
else:
setattr(self, attr, mat[attr])
else:
setattr(self, attr, None)
self.data_dtype = self.data.dtype
self.fn = fn_mat
self.flags = ApresFlags()
self.header = ApresHeader()
self.flags.from_matlab(mat['flags'])
self.check_attrs()
def check_attrs(self):
"""Check if required attributes exist.
This is largely for development only; loaders should generally call this method last,
so that they can confirm that they have defined the necessary attributes.
Raises
------
ImpdarError
If any required attribute is None or any optional attribute is fully absent"""
for attr in self.attrs_guaranteed:
if not hasattr(self, attr):
raise ImpdarError('{:s} is missing. \
It appears that this is an ill-defined RadarData object'.format(attr))
if getattr(self, attr) is None:
raise ImpdarError('{:s} is None. \
It appears that this is an ill-defined RadarData object'.format(attr))
for attr in self.attrs_optional:
if not hasattr(self, attr):
raise ImpdarError('{:s} is missing. \
It appears that this is an ill-defined RadarData object'.format(attr))
if not hasattr(self, 'data_dtype') or self.data_dtype is None:
self.data_dtype = self.data.dtype
return
@property
def datetime(self):
"""A python operable version of the time of acquisition of each trace"""
return np.array([datetime.datetime.fromordinal(int(dd)) + datetime.timedelta(days=dd % 1) - datetime.timedelta(days=366)
for dd in self.decday], dtype=np.datetime64)
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
#
# Distributed under terms of the GNU GPL3 license.
"""
Process ApRES data
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
Sept 23 2019
"""
import numpy as np
def apres_range(self,p,max_range=4000,winfun='blackman'):
"""
Parameters
---------
self: class
data object
p: int
pad factor, level of interpolation for fft
winfun: str
window function for fft
Output
--------
Rcoarse: array
range to bin centres (m)
Rfine: array
range to reflector from bin centre (m)
spec_cor: array
spectrum corrected. positive frequency half of spectrum with
ref phase subtracted. This is the complex signal which can be used for
cross-correlating two shot segements.
### Original Matlab File Notes ###
Phase sensitive processing of FMCW radar data based on Brennan et al. 2013
Based on Paul's scripts but following the nomenclature of:
"Phase-sensitive FMCW radar imaging system for high precision Antarctic
ice shelf profile monitoring"
Brennan, Lok, Nicholls and Corr, 2013
Summary: converts raw FMCW radar voltages into a range for
Craig Stewart
2013 April 24
Modified frequencies 10 April 2014
"""
if self.flags.range != 0:
raise TypeError('The range filter has already been done on these data.')
# Processing settings
nf = int(np.floor(p*self.snum/2)) # number of frequencies to recover
# window for fft
if winfun not in ['blackman','bartlett','hamming','hanning','kaiser']:
raise TypeError('Window must be in: blackman, bartlett, hamming, hanning, kaiser')
elif winfun == 'blackman':
win = np.blackman(self.snum)
elif winfun == 'bartlett':
win = np.bartlett(self.snum)
elif winfun == 'hamming':
win = np.hamming(self.snum)
elif winfun == 'hanning':
win = np.hanning(self.snum)
elif winfun == 'kaiser':
win = np.kaiser(self.snum)
# round-trip delay Brennan et al. (2014) eq. 18
tau = np.arange(nf)/(self.header.bandwidth*p)
# Get the coarse range
self.Rcoarse = tau*self.header.ci/2.
# Calculate phase of each range bin center for correction
# Brennan et al. (2014) eq. 17 measured at t=T/2
self.phiref = 2.*np.pi*self.header.fc*tau -(self.header.chirp_grad*tau**2.)/2
# --- Loop through for each chirp in burst --- #
# preallocate
spec = np.zeros((self.bnum,self.cnum,nf)).astype(np.cdouble)
spec_cor = np.zeros((self.bnum,self.cnum,nf)).astype(np.cdouble)
for ib in range(self.bnum):
for ic in range(self.cnum):
# isolate the chirp and preprocess before transform
chirp = self.data[ib,ic,:].copy()
chirp = chirp-np.mean(chirp) # de-mean
chirp *= win # windowed
# fourier transform
fft_chirp = (np.sqrt(2.*p)/len(chirp))*np.fft.fft(chirp,p*self.snum) # fft and scale for padding
fft_chirp /= np.sqrt(np.mean(win**2.)) # scale with rms of window
# output
spec[ib,ic,:] = fft_chirp[:nf] # positive frequency half of spectrum up to (nyquist minus deltaf)
comp = np.exp(-1j*(self.phiref)) # unit phasor with conjugate of phiref phase
spec_cor[ib,ic,:] = comp*fft_chirp[:nf] # positive frequency half of spectrum with ref phase subtracted
self.data = spec_cor.copy()
self.spec = spec.copy()
# precise range measurement
self.Rfine = phase2range(np.angle(self.data),self.header.lambdac,
np.tile(self.Rcoarse,(self.bnum,self.cnum,1)),
self.header.chirp_grad,self.header.ci)
# Crop output variables to useful depth range only
n = np.argmin(self.Rcoarse<=max_range)
self.Rcoarse = self.Rcoarse[:n]
self.Rfine = self.Rfine[:,:,:n]
self.data = self.data[:,:,:n]
self.spec = self.spec[:,:,:n]
self.snum = n
self.flags.range = max_range
# --------------------------------------------------------------------------------------------
def phase_uncertainty(self):
"""
Calculate the phase uncertainty using a noise phasor.
Following Kingslake et al. (2014)
Parameters
---------
Output
--------
phase_uncertainty: array
uncertainty in the phase (rad)
r_uncertainty: array
uncertainty in the range (m) calculated from phase uncertainty
"""
if self.flags.range == 0:
raise TypeError('The range filter has not been executed on this data class, do that before the uncertainty calculation.')
# Get measured phasor from the data class, and use the median magnitude for noise phasor
meas_phasor = self.data
median_mag = np.nanmedian(abs(meas_phasor))
# Noise phasor with random phase and magnitude equal to median of measured phasor
noise_phase = np.random.uniform(-np.pi,np.pi,np.shape(meas_phasor))
noise_phasor = median_mag*(np.cos(noise_phase)+1j*np.sin(noise_phase))
noise_orth = median_mag*np.sin(np.angle(meas_phasor)-np.angle(noise_phasor))
# Phase uncertainty is the deviation in the phase introduced by the noise phasor when it is oriented perpendicular to the reflector phasor
phase_uncertainty = np.abs(np.arcsin(noise_orth/np.abs(meas_phasor)))
# Convert phase to range
r_uncertainty = phase2range(phase_uncertainty,
self.header.lambdac,
self.Rcoarse,
self.header.chirp_grad,
self.header.ci)
return phase_uncertainty, r_uncertainty
# --------------------------------------------------------------------------------------------
def phase2range(phi,lambdac,rc=None,K=None,ci=None):
"""
Convert phase difference to range for FMCW radar
Parameters
---------
phi: float or array
phase (radians), must be of spectrum after bin center correction
lambdac: float
wavelength (m) at center frequency
rc: float; optional
coarse range of bin center (m)
K: float; optional
chirp gradient (rad/s/s)
ci: float; optional
propagation velocity (m/s)
Output
--------
r: float or array
range (m)
### Original Matlab File Notes ###
Craig Stewart
2014/6/10
"""
if not all([K,ci]) or rc is None:
# First order method
# Brennan et al. (2014) eq 15
r = lambdac*phi/(4.*np.pi)
else:
# Precise
r = phi/((4.*np.pi/lambdac) - (4.*rc*K/ci**2.))
return r
# --------------------------------------------------------------------------------------------
def range_diff(self,acq1,acq2,win,step,Rcoarse=None,r_uncertainty=None,uncertainty='CR'):
"""
Calculate the vertical motion using a correlation coefficient.
Parameters
---------
self: class
data object
acq1: array
first acquisition for comparison
acq2: array
second acquisition for comparison
win: int
window size over which to do the correlation coefficient calculation
step: int
step size for the window to move between calculations
Rcoarse: array; optional
if an external depth array is desired, input here
r_uncertainty: array; optional
if unceratinty based on the noise vector is desired input value here
this should be the sum of uncertainty from both acquisitions.
uncertainty: string;
default 'CR' Cramer-Rao bound as in Jordan et al. (2020)
Output
--------
ds: array
depths at which the correlation coefficient is calculated
phase_diff: array
correlation coefficient between acquisitions
amplitude indicates how well reflection packets match between acquisitions
phase is a measure of the vertical motion
range_diff: array
vertical motion in meters
"""
if np.shape(acq1) != np.shape(acq2):
raise TypeError('Acquisition inputs must be of the same shape.')
idxs = np.arange(win//2,(len(acq1)-win//2),step)
if Rcoarse is not None:
ds = Rcoarse[idxs]
else:
ds = self.Rcoarse[idxs]
co = np.empty_like(ds).astype(np.complex)
for i,idx in enumerate(idxs):
# index two sub_arrays to compare
arr1 = acq1[idx-win//2:idx+win//2]
arr2 = acq2[idx-win//2:idx+win//2]
# correlation coefficient to get the motion
# the amplitude indicates how well the reflections match between acquisitions
# the phase is a measure of the offset
co[i] = np.corrcoef(arr1,arr2)[1,0]
# convert the phase offset to a distance vector
r_diff = phase2range(np.angle(co),
self.header.lambdac,
ds,
self.header.chirp_grad,
self.header.ci)
if uncertainty == 'CR':
# Error from Cramer-Rao bound, Jordan et al. (2020) Ann. Glac. eq. (5)
sigma = (1./abs(co))*np.sqrt((1.-abs(co)**2.)/(2.*win))
# convert the phase offset to a distance vector
r_diff_unc = phase2range(sigma,
self.header.lambdac,
ds,
self.header.chirp_grad,
self.header.ci)
elif uncertainty == 'noise_phasor':
# Uncertainty from Noise Phasor as in Kingslake et al. (2014)
# r_uncertainty should be calculated using the function phase_uncertainty defined in this script
r_diff_unc = np.array([np.nanmean(r_uncertainty[i-win//2:i+win//2]) for i in idxs])
return ds, co, r_diff, r_diff_unc
# --------------------------------------------------------------------------------------------
def stacking(self,num_chirps=None):
"""
Stack traces/chirps together to beat down the noise.
Parameters
---------
num_chirps: int
number of chirps to average over
"""
if num_chirps == None:
num_chirps = self.cnum*self.bnum
num_chirps = int(num_chirps)
if num_chirps == self.cnum:
self.data = np.reshape(np.mean(self.data,axis=1),(self.bnum,1,self.snum))
self.cnum = 1
else:
# reshape to jump across bursts
data_hold = np.reshape(self.data,(1,self.cnum*self.bnum,self.snum))
# take only the first set of chirps
data_hold = data_hold[:,:num_chirps,:]
self.data = np.array([np.mean(data_hold,axis=1)])
self.bnum = 1
self.cnum = 1
self.flags.stack = num_chirps
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 dlilien <dlilien@berens>
#
# Distributed under terms of the MIT license.
"""
"""
import numpy as np
from scipy.io import savemat
from .ApresFlags import ApresFlags
from .ApresHeader import ApresHeader
def save_apres(self, fn):
"""Save the radar data
Parameters
----------
fn: str
Filename. Should have a .mat extension
"""
mat = {}
for attr in self.attrs_guaranteed:
if getattr(self, attr) is not None:
mat[attr] = getattr(self, attr)
else:
# this guards against error in matlab format
mat[attr] = 0
for attr in self.attrs_optional:
if hasattr(self, attr) and getattr(self, attr) is not None:
mat[attr] = getattr(self, attr)
if self.flags is not None:
mat['flags'] = self.flags.to_matlab()
else:
# We want the structure available to prevent read errors from corrupt files
mat['flags'] = ApresFlags().to_matlab()
if self.header is not None:
mat['header'] = self.header.to_matlab()
else:
# We want the structure available to prevent read errors from corrupt files
mat['header'] = ApresHeader().to_matlab()
# Make sure not to expand the size of the data due to type conversion
if hasattr(self, 'data_dtype') and self.data_dtype is not None and self.data_dtype != mat['data'].dtype:
# Be carefuly of obliterating NaNs
# We will use singles instead of ints for this guess
if (self.data_dtype in [int, np.int8, np.int16]) and np.any(np.isnan(mat['data'])):
print('Warning: new file is float16 rather than ', self.data_dtype, ' since we now have NaNs')
mat['data'] = mat['data'].astype(np.float16)
elif (self.data_dtype in [np.int32]) and np.any(np.isnan(mat['data'])):
print('Warning: new file is float32 rather than ', self.data_dtype, ' since we now have NaNs')
mat['data'] = mat['data'].astype(np.float32)
elif (self.data_dtype in [np.int64]) and np.any(np.isnan(mat['data'])):
print('Warning: new file is float64 rather than ', self.data_dtype, ' since we now have NaNs')
mat['data'] = mat['data'].astype(np.float64)
else:
mat['data'] = mat['data'].astype(self.data_dtype)
savemat(fn, mat)
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
#
# Distributed under terms of the GNU GPL3.0 license.
"""
Flags to keep track of processing steps
"""
import numpy as np
class ApresFlags():
"""Flags that indicate the processing that has been used on the data.
These are used for figuring out whether different processing steps have been performed. They also contain some information about the input arguments for some (but not all) of the processing steps.
Attributes
----------
batch: bool
Legacy indication of whether we are batch processing. Always False.
agc: bool
Automatic gain control has been applied.
reverse: bool
Data have been reversed.
restack: bool
Data have been restacked.
rgain: bool
Data have a linear range gain applied.
bpass: 3x1 :class:`numpy.ndarray`
Elements: (1) 1 if bandpassed; (2) Low; and (3) High (MHz) bounds
hfilt: 2x1 :class:`numpy.ndarray`
Elements: (1) 1 if horizontally filtered; (2) Filter type
interp: 2x1 :class:`numpy.ndarray`
Elements: (1) 1 if constant distance spacing applied (2) The constant spacing (m)
mig: 2x1 :class: String
None if no migration done, mtype if migration done.
"""
def __init__(self):
self.file_read_code = None
self.range = 0
self.stack = 1
self.attrs = ['file_read_code','phase2range','stack']
self.attr_dims = [None,None,None]
def to_matlab(self):
"""Convert all associated attributes into a dictionary formatted for use with :func:`scipy.io.savemat`
"""
outmat = {att: getattr(self, att) for att in self.attrs}
return outmat
def from_matlab(self, matlab_struct):
"""Associate all values from an incoming .mat file (i.e. a dictionary from :func:`scipy.io.loadmat`) with appropriate attributes
"""
for attr, attr_dim in zip(self.attrs, self.attr_dims):
setattr(self, attr, matlab_struct[attr][0][0][0])
# Use this because matlab inputs may have zeros for flags that
# were lazily appended to be arrays, but we preallocate
if attr_dim is not None and getattr(self, attr).shape[0] == 1:
setattr(self, attr, np.zeros((attr_dim, )))
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
#
# Distributed under terms of the GNU GPL3 license.
"""
Header for ApRES data
This code is based on a series of Matlab scripts from Craig Stewart,
Keith Nicholls, and others.
The ApRES (Automated phase-sensitive Radio Echo Sounder) is a self-contained
instrument from BAS.
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
Sept 23 2019
"""
import numpy as np
import re
# --------------------------------------------------------------------------------------------
class ApresHeader():
"""
Class for parameters from the header file.
"""
def __init__(self):
"""Initialize data paramaters"""
self.fsysclk = 1e9
self.fs = 4e4
self.fn = None
self.header_string = None
self.file_format = None
self.noDwellHigh = None
self.noDwellLow = None
self.f0 = None
self.f_stop = None
self.ramp_up_step = None
self.ramp_down_step = None
self.tstep_up = None
self.tstep_down = None
self.snum = None
self.nsteps_DDS = None
self.chirp_length = None
self.chirp_grad = None
self.nchirp_samples = None
self.ramp_dir = None
# --------------------------------------------------------------------------------------------
def read_header(self,fn_apres,max_header_len=2000):
"""
Read the header string, to be partitioned later
Parameters
---------
fn_apres: string
file name to update with
max_header_len: int
maximum length of header to read (can be too long)
Output
---------
"""
self.fn = fn_apres
fid = open(fn_apres,'rb')
self.header_string = str(fid.read(max_header_len))
fid.close()
# --------------------------------------------------------------------------------------------
def get_file_format(self):
"""
Determine fmcw file format from burst header using keyword presence
There are a few different formats through the years.
### Original Matlab script Notes ###
Craig Stewart
2013-10-20
Updated by Keith Nicholls, 2014-10-22: RMB2
"""
if 'SW_Issue=' in self.header_string: # Data from RMB2 after Oct 2014
self.file_format = 5
elif 'SubBursts in burst:' in self.header_string: # Data from after Oct 2013
self.file_format = 4
elif '*** Burst Header ***' in self.header_string: # Data from Jan 2013
self.file_format = 3
elif 'RADAR TIME' in self.header_string: # Data from Prototype FMCW radar (nov 2012)
self.file_format = 2
else:
raise TypeError('Unknown file format - check file')
def update_parameters(self,fn_apres=None):
"""
Update the parameters with the apres file header
### Original Matlab Notes ###
Extract from the hex codes the actual paramaters used by RMB2
The contents of config.ini are copied into a data header.
Note this script assumes that the format of the hex codes have quotes
e.g. Reg02="0D1F41C8"
Checks for a sampling frequency of 40 or 80 KHz. Apart from Lai Bun's
variant (WDC + Greenland) it could be hard coded to 40 KHz.
However, there is no check made by the system that the N_ADC_SAMPLES
matches the requested chirp length
NOT COMPLETE - needs a means of checking for profile mode, where multiple sweeps
per period are transmitted- see last line
"""
if self.header_string is None:
if fn_apres is None:
raise TypeError('Must input file name if the header has not been read yet.')
else:
self.read_header(fn_apres)
if self.file_format is None:
self.get_file_format()
loc1 = [m.start() for m in re.finditer('Reg0', self.header_string)]
loc2 = [m.start() for m in re.finditer('="', self.header_string)]
for k in range(len(loc1)):
case = self.header_string[loc1[k]:loc2[k]]
if case == 'Reg01':
# Control Function Register 2 (CFR2) Address 0x01 Four bytes
# Bit 19 (Digital ramp enable)= 1 = Enables digital ramp generator functionality.
# Bit 18 (Digital ramp no-dwell high) 1 = enables no-dwell high functionality.
# Bit 17 (Digital ramp no-dwell low) 1 = enables no-dwell low functionality.
# With no-dwell high, a positive transition of the DRCTL pin initiates a positive slope ramp, which
# continues uninterrupted (regardless of any activity on the DRCTL pin) until the upper limit is reached.
# Setting both no-dwell bits invokes a continuous ramping mode of operation;
loc3 = self.header_string[loc2[k]+2:].find('"')
val = self.header_string[loc2[k]+2:loc2[k]+loc3+2]
val = bin(int(val, 16))
val = val[::-1]
self.noDwellHigh = int(val[18])
self.noDwellLow = int(val[17])
#elif case == 'Reg08':
# # Phase offset word Register (POW) Address 0x08. 2 Bytes dTheta = 360*POW/2^16.
# val = char(reg{1,2}(k));
# H.phaseOffsetDeg = hex2dec(val(1:4))*360/2^16;
elif case == 'Reg0B':
# Digital Ramp Limit Register Address 0x0B
# Digital ramp upper limit 32-bit digital ramp upper limit value.
# Digital ramp lower limit 32-bit digital ramp lower limit value.
loc3 = self.header_string[loc2[k]+2:].find('"')
val = self.header_string[loc2[k]+2:loc2[k]+loc3+2]
self.f0 = int(val[8:], 16)*self.fsysclk/(2**32)
self.f_stop = int(val[:8], 16)*self.fsysclk/(2**32)
elif case == 'Reg0C':
# Digital Ramp Step Size Register Address 0x0C
# Digital ramp decrement step size 32-bit digital ramp decrement step size value.
# Digital ramp increment step size 32-bit digital ramp increment step size value.
loc3 = self.header_string[loc2[k]+2:].find('"')
val = self.header_string[loc2[k]+2:loc2[k]+loc3+2]
self.ramp_up_step = int(val[8:], 16)*self.fsysclk/(2**32)
self.ramp_down_step = int(val[:8], 16)*self.fsysclk/(2**32)
elif case == 'Reg0D':
# Digital Ramp Rate Register Address 0x0D
# Digital ramp negative slope rate 16-bit digital ramp negative slope value that defines the time interval between decrement values.
# Digital ramp positive slope rate 16-bit digital ramp positive slope value that defines the time interval between increment values.
loc3 = self.header_string[loc2[k]+2:].find('"')
val = self.header_string[loc2[k]+2:loc2[k]+loc3+2]
self.tstep_up = int(val[4:], 16)*4/self.fsysclk
self.tstep_down = int(val[:4], 16)*4/self.fsysclk
strings = ['SamplingFreqMode=','N_ADC_SAMPLES=']
output = np.empty((len(strings))).astype(str)
for i,string in enumerate(strings):
if string in self.header_string:
search_start = self.header_string.find(string)
search_end = self.header_string[search_start:].find('\\')
output[i] = self.header_string[search_start+len(string):search_end+search_start]
self.fs = output[0]
if self.fs == 1: # if self.fs > 70e3:
self.fs = 8e4 # self.fs = 80e3
else: # else
self.fs = 4e4 # self.fs = 40e3
self.snum = int(output[1])
self.nsteps_DDS = round(abs((self.f_stop - self.f0)/self.ramp_up_step)) # abs as ramp could be down
self.chirp_length = int(self.nsteps_DDS * self.tstep_up)
self.nchirp_samples = round(self.chirp_length * self.fs)
# If number of ADC samples collected is less than required to collect
# entire chirp, set chirp length to length of series actually collected
if self.nchirp_samples > self.snum:
self.chirp_length = self.snum / self.fs
self.chirp_grad = 2.*np.pi*(self.ramp_up_step/self.tstep_up) # chirp gradient (rad/s/s)
if self.f_stop > 400e6:
self.ramp_dir = 'down'
else:
self.ramp_dir = 'up'
if self.noDwellHigh and self.noDwellLow:
self.ramp_dir = 'upDown'
self.nchirpsPerPeriod = np.nan # self.nchirpSamples/(self.chirpLength)
# --------------------------------------------------------------------------------------------
def to_matlab(self):
"""Convert all associated attributes into a dictionary formatted for use with :func:`scipy.io.savemat`
"""
outmat = {att: getattr(self, att) for att in vars(self)}
return outmat
def from_matlab(self, matlab_struct):
"""Associate all values from an incoming .mat file (i.e. a dictionary from :func:`scipy.io.loadmat`) with appropriate attributes
"""
for attr, attr_dim in zip(self.attrs, self.attr_dims):
setattr(self, attr, matlab_struct[attr][0][0][0])
# Use this because matlab inputs may have zeros for flags that
# were lazily appended to be arrays, but we preallocate
if attr_dim is not None and getattr(self, attr).shape[0] == 1:
setattr(self, attr, np.zeros((attr_dim, )))
for attr in self.bool_attrs:
setattr(self, attr, True if matlab_struct[attr][0][0][0] == 1 else 0)
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
#
# Distributed under terms of the GNU GPL3 license.
"""
Load ApRES data
This code is based on a series of Matlab scripts from Craig Stewart,
Keith Nicholls, and others.
The ApRES (Automated phase-sensitive Radio Echo Sounder) is a self-contained
instrument from BAS.
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
Sept 23 2019
"""
import numpy as np
import datetime
import re
from . import ApresData
# -----------------------------------------------------------------------------------------------------
def load_apres(fns_apres,burst=1,fs=40000, *args, **kwargs):
"""Load and concatenate all apres data from several files
Parameters
----------
fns_apres: list of file names for ApresData
each loads object to concatenate
Returns
-------
RadarData
A single, concatenated output.
"""
apres_data = []
for fn in fns_apres:
try:
apres_data.append(load_apres_single_file(fn,burst=burst,fs=fs,*args,**kwargs))
except:
Warning('Cannot load file: '+fn)
from copy import deepcopy
out = deepcopy(apres_data[0])
for dat in apres_data[1:]:
if out.snum != dat.snum:
raise ValueError('Need the same number of vertical samples in each file')
if out.cnum != dat.cnum:
raise ValueError('Need the same number of chirps in each file')
if not np.all(out.travel_time == dat.travel_time):
raise ValueError('Need matching travel time vectors')
if not np.all(out.frequencies == dat.frequencies):
raise ValueError('Need matching frequency vectors')
out.data = np.vstack([[dat.data] for dat in apres_data])
out.chirp_num = np.vstack([[dat.chirp_num] for dat in apres_data])
out.chirp_att = np.vstack([[dat.chirp_att] for dat in apres_data])
out.chirp_time = np.vstack([[dat.chirp_time] for dat in apres_data])
out.time_stamp = np.hstack([dat.time_stamp for dat in apres_data])
out.temperature1 = np.hstack([dat.temperature1 for dat in apres_data])
out.temperature2 = np.hstack([dat.temperature2 for dat in apres_data])
out.battery_voltage = np.hstack([dat.battery_voltage for dat in apres_data])
out.bnum = np.shape(out.data)[0]
return out
def load_apres_single_file(fn_apres,burst=1,fs=40000, *args, **kwargs):
"""
Load ApRES data
This function calls the load_burst function below
Parameters
---------
fn_apres: string
file name
burst: int
number of bursts to load
fs: int
sampling frequency
### Original Matlab Notes ###
Craig Stewart
2013 April 24
2013 September 30 - corrected error in vif scaling
2014/5/20 time stamp moved here from fmcw_derive_parameters (so that this
is not overwritted later)
2014/5/21 changed how radar chirp is defined (now using chirp gradient as
fundamental parameter)
2014/5/22 fixed bug in chirptime
2014/8/21 moved make odd length to external (called from fmcw_range)
2014/10/22 KWN - edited to allow for new headers in RMB2 files
"""
## Load data and reshape array
if fn_apres[-4:] == '.mat':
# TODO: fix this in the __init__ file
apres_data = ApresData(fn_apres)
else:
apres_data = ApresData(None)
apres_data.header.update_parameters(fn_apres)
start_ind,end_ind = load_burst(apres_data, burst, fs)
# Extract just good chirp data from voltage record and rearrange into
# matrix with one chirp per row
# note: you can't just use reshape as we are also cropping the 20K samples
# of sync tone etc which occur after each 40K of chirp.
AttSet = apres_data.header.attenuator1 + 1j*apres_data.header.attenuator2 # unique code for attenuator setting
## Add metadata to structure
# Sampling parameters
if apres_data.header.file_format is None:
raise TypeError("File format is 'None', cannot load")
else:
if apres_data.header.file_format != 5:
raise TypeError('Loading functions have only been written for rmb5 data.\
Look back to the original Matlab scripts if you need to implement earlier formats.')
else:
apres_data.header.f1 = apres_data.header.f0 + apres_data.header.chirp_length * apres_data.header.chirp_grad/2./np.pi
apres_data.header.bandwidth = apres_data.header.chirp_length * apres_data.header.chirp_grad/2/np.pi
apres_data.header.fc = apres_data.header.f0 + apres_data.header.bandwidth/2.
apres_data.dt = 1./apres_data.header.fs
apres_data.header.er = 3.18
apres_data.header.ci = 3e8/np.sqrt(apres_data.header.er);
apres_data.header.lambdac = apres_data.header.ci/apres_data.header.fc;
# Load each chirp into a row
data_load = np.zeros((apres_data.cnum,apres_data.snum)) # preallocate array
apres_data.chirp_num = np.arange(apres_data.cnum)
apres_data.chirp_att = np.zeros((apres_data.cnum)).astype(np.cdouble)
apres_data.chirp_time = np.zeros((apres_data.cnum))
chirp_interval = 1.6384/(24.*3600.); # days TODO: why is this assigned directly?
for chirp in range(apres_data.cnum):
data_load[chirp,:] = apres_data.data[start_ind[chirp]:end_ind[chirp]]
apres_data.chirp_att[chirp] = AttSet[chirp//apres_data.cnum] # attenuator setting for chirp
apres_data.chirp_time[chirp] = apres_data.decday + chirp_interval*(chirp-1) # time of chirp
apres_data.data = data_load
# Create time and frequency stamp for samples
apres_data.travel_time = apres_data.dt*np.arange(apres_data.snum) # sampling times (rel to first)
apres_data.frequencies = apres_data.header.f0 + apres_data.travel_time*apres_data.header.chirp_grad/(2.*np.pi)
apres_data.travel_time *= 1e6
apres_data.data_dtype = apres_data.data.dtype
return apres_data
# -----------------------------------------------------------------------------------------------------
def load_burst(self,burst=1,fs=40000,max_header_len=2000,burst_pointer=0):
"""
Load bursts from the apres acquisition.
Normally, this should be called from the load_apres function.
Parameters
---------
burst: int
number of bursts to load
fs: int
sampling frequency
max_header_len: int
maximum length to read for header (can be too long)
burst_pointer: int
where to start reading the file for bursts
Output
---------
### Original Matlab Script Notes ###
Read FMCW data file from after Oct 2014 (RMB2b + VAB Iss C, SW Issue >= 101)
Corrected so that Sampling Frequency has correct use (ie, not used in
this case)
"""
if self.header.fn is None:
raise TypeError('Read in the header before loading data.')
if self.header.file_format != 5:
raise TypeError('Loading functions have only been written for rmb5 data.\
Look back to the original Matlab scripts if you need to implement earlier formats.')
try:
fid = open(self.header.fn,'rb')
except:
# Unknown file
self.flags.file_read_code = 'Unable to read file' + self.header.fn
raise TypeError('Cannot open file', self.header.fn)
# Get the total length of the file
fid.seek(0,2)
file_len = fid.tell()
burst_count = 1
# --- Read bursts in a loop --- #
while burst_count <= burst and burst_pointer <= file_len - max_header_len:
# Go to burst pointer and read the header for the burst
fid.seek(burst_pointer)
self.header.read_header(self.header.fn,max_header_len)
try:
# Read header values
strings = ['N_ADC_SAMPLES=','NSubBursts=','Average=','nAttenuators=','Attenuator1=',
'AFGain=','TxAnt=','RxAnt=']
output = np.empty((len(strings))).astype(str)
for i,string in enumerate(strings):
if string in self.header.header_string:
search_start = self.header.header_string.find(string)
search_end = self.header.header_string[search_start:].find('\\')
output[i] = self.header.header_string[search_start+len(string):search_end+search_start]
# Write header values to data object
self.snum = int(output[0])
self.n_subbursts = int(output[1])
self.average = int(output[2])
self.header.n_attenuators = int(output[3])
self.header.attenuator1 = np.array(output[4].split(',')).astype(int)[:self.header.n_attenuators]
self.header.attenuator2 = np.array(output[5].split(',')).astype(int)[:self.header.n_attenuators]
self.header.tx_ant = np.array(output[6].split(',')).astype(int)
self.header.rx_ant = np.array(output[7].split(',')).astype(int)
self.header.tx_ant = self.header.tx_ant[self.header.tx_ant==1]
self.header.rx_ant = self.header.rx_ant[self.header.rx_ant==1]
if self.average != 0:
self.cnum = 1
else:
self.cnum = self.n_subbursts*len(self.header.tx_ant)*\
len(self.header.rx_ant)*self.header.n_attenuators
# End of burst
search_string = '*** End Header ***'
search_ind = self.header.header_string.find(search_string)
burst_pointer += search_ind + len(search_string)
except:
# If the burst read is unsuccessful exit with an updated read code
self.flags.file_read_code = 'Corrupt header in burst' + str(burst_count) + 'for file' + self.header.fn
self.bnum = burst_count
raise TypeError('Burst Read Failed.')
# Move the burst pointer
if burst_count < burst and burst_pointer <= file_len - max_header_len:
if self.average != 0:
burst_pointer += self.cnum*self.snum*4
else:
burst_pointer += self.cnum*self.snum*2
burst_count += 1
# --- Get remaining information from burst header --- #
# Look for a few different strings and save output
strings = ['Time stamp=','Temp1=','Temp2=','BatteryVoltage=']
output = []
for i,string in enumerate(strings):
if string in self.header.header_string:
search_start = [m.start() for m in re.finditer(string, self.header.header_string)]
search_end = [self.header.header_string[ind:].find('\\') for ind in search_start]
out = [self.header.header_string[search_start[i]+len(string):search_end[i]+search_start[i]] for i in range(len(search_start))]
output.append(out)
if 'Time stamp' not in self.header.header_string:
self.flags.file_read_code = 'Burst' + str(self.bnum) + 'not found in file' + self.header.fn
else:
self.time_stamp = np.array([datetime.datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S') for str_time in output[0]])
timezero = datetime.datetime(1, 1, 1, 0, 0, 0)
day_offset = self.time_stamp - timezero
self.decday = np.array([offset.days for offset in day_offset]) + 377. # Matlab compatable
self.temperature1 = np.array(output[1]).astype(float)
self.temperature2 = np.array(output[2]).astype(float)
self.battery_voltage = np.array(output[3]).astype(float)
# --- Read in the actual data --- #
# Go to the end of the header
end_byte = b'*** End Header ***'
data_ind = fid.read(max_header_len).rfind(end_byte) + len(end_byte)
fid.seek(data_ind)
# Only if all the bursts were read
if burst_count != burst+1:
# too few bursts in file
self.flags.file_read_code = 'Burst' + str(self.bnum) + 'not found in file' + self.header.fn
self.bnum = burst_count - 1
raise TypeError('Burst ' + str(self.bnum) + ' not found in file ' + self.header.fn)
else:
# TODO: Check the other readers for average == 1 or average == 2
if self.average == 2:
self.data = np.fromfile(fid,dtype='uint32',count=self.cnum*self.snum)
elif self.average == 1:
fid.seek(burst_pointer+1)
self.data = np.fromfile(fid,dtype='float4',count=self.cnum*self.snum)
else:
self.data = np.fromfile(fid,dtype='uint16',count=self.cnum*self.snum)
if fid.tell()-(burst_pointer-1) < self.cnum*self.snum:
self.flags.file_read_code = 'Corrupt header in burst' + str(burst_count) + 'for file' + self.header.fn
self.data[self.data<0] = self.data[self.data<0] + 2**16.
self.data = self.data.astype(float) * 2.5/2**16.
if self.average == 2:
self.data /= (self.n_subbursts*self.header.n_attenuators)
start_ind = np.transpose(np.arange(0,self.snum*self.cnum,self.snum))
end_ind = start_ind + self.snum
self.bnum = burst
fid.close()
# Clean temperature record (wrong data type?)
self.temperature1[self.temperature1>300] -= 512
self.temperature2[self.temperature2>300] -= 512
self.flags.file_read_code = 'Successful Read'
return start_ind,end_ind
+1
-1
Metadata-Version: 1.0
Name: impdar
Version: 1.1.4.post3
Version: 1.1.4.post5
Summary: Scripts for impulse radar

@@ -5,0 +5,0 @@ Home-page: http://github.com/dlilien/impdar

@@ -35,2 +35,8 @@ README.md

impdar/lib/process.py
impdar/lib/ApresData/ApresFlags.py
impdar/lib/ApresData/ApresHeader.py
impdar/lib/ApresData/_ApresDataProcessing.py
impdar/lib/ApresData/_ApresDataSaving.py
impdar/lib/ApresData/__init__.py
impdar/lib/ApresData/load_apres.py
impdar/lib/RadarData/_RadarDataFiltering.py

@@ -37,0 +43,0 @@ impdar/lib/RadarData/_RadarDataProcessing.py

@@ -364,1 +364,43 @@ #! /usr/bin/env python

return mat
def crop(self, ind):
"""Crop the picks.
This is just subtraction with some bounds checks.
It is easy since we assume that the work to convert to indices has already been done.
Not needed for bottom crops.
Parameters
----------
ind: int or ndarray(tnum, ):
How much we need to shift by
"""
for attr in ['samp1', 'samp2', 'samp3']:
if hasattr(self, attr) and getattr(self, attr) is not None:
val = getattr(self, attr)
# Be sure to preserve nans
nanmask = np.isnan(val)
val -= ind
val[nanmask] = np.nan
# And allow cropping such that picks are no longer within the window
val[val < 0] = np.nan
val[val >= self.radardata.snum] = np.nan
setattr(self, attr, val)
def restack(self, traces):
for attr, nptype in zip(['samp1', 'samp2', 'samp3', 'time', 'power'], [int, int, int, float, float]):
if hasattr(self, attr) and getattr(self, attr) is not None:
val = getattr(self, attr)
tnum = int(np.floor(val.shape[1] / traces))
new_vals = np.zeros((val.shape[0], tnum))
new_vals[:] = np.nan
for j in range(tnum):
# It is not totally clear if this should be a mean or nanmean
new_vals[:, j] = np.nanmean(val[:, j * traces:min((j + 1) * traces, val.shape[1])], axis=1).astype(nptype)
new_vals[new_vals < 0] = np.nan
new_vals[new_vals >= self.radardata.snum] = np.nan
setattr(self, attr, new_vals)

@@ -21,2 +21,3 @@ #! /usr/bin/env python

def plot(fns, tr=None, s=False, ftype='png', dpi=300, xd=False, yd=False,

@@ -197,3 +198,3 @@ dualy=False, x_range=(0, -1), power=None, spectra=None,

if hasattr(dat.flags, 'elev') and dat.flags.elev:
yd = dat.elevation[y_range[0]:y_range[-1]]
yd = dat.elevation
ax.set_ylabel('Elevation (m)')

@@ -207,9 +208,9 @@ else:

y_range = (max(y_range[0], np.min(np.where(~np.isnan(dat.travel_time))[0])), y_range[1])
yd = dat.travel_time[y_range[0]:y_range[-1]]
yd = dat.travel_time
ax.set_ylabel('Two way travel time (usec)')
elif ydat == 'depth':
if dat.nmo_depth is not None:
yd = dat.nmo_depth[y_range[0]:y_range[-1]]
yd = dat.nmo_depth
else:
yd = dat.travel_time[y_range[0]:y_range[-1]] / 2.0 * (
yd = dat.travel_time / 2.0 * (
1.69e8 * 1.0e-6)

@@ -221,12 +222,12 @@ ax.set_ylabel('Depth (m)')

yd = dat.travel_time[y_range[0]:y_range[-1]]
yd = dat.travel_time
ax.set_ylabel('Two way travel time (usec)')
ax2 = ax.twinx()
if dat.nmo_depth is not None:
yd2 = dat.nmo_depth[y_range[0]:y_range[-1]]
yd2 = dat.nmo_depth
else:
yd2 = dat.travel_time[y_range[0]:y_range[-1]] / 2.0 * (
yd2 = dat.travel_time / 2.0 * (
1.69e8 * 1.0e-6)
ax2.set_ylabel('Approximate depth (m)')
ax2.set_ylim(yd2[-1], yd2[0])
ax2.set_ylim(yd2[y_range[-1] - 1], yd2[y_range[0]])
else:

@@ -237,6 +238,6 @@ raise ValueError('Unrecognized ydat, choices are elev, twtt, \

if xdat == 'tnum':
xd = np.arange(int(dat.tnum))[x_range[0]:x_range[-1]]
xd = np.arange(int(dat.tnum))
ax.set_xlabel('Trace number')
elif xdat == 'dist':
xd = dat.dist[x_range[0]:x_range[-1]]
xd = dat.dist
ax.set_xlabel('Distance (km)')

@@ -263,3 +264,3 @@

vmax=clims[1],
extent=[np.min(xd), np.max(xd), np.max(yd), np.min(yd)],
extent=[np.min(xd[x_range[0]:x_range[-1]]), np.max(xd[x_range[0]:x_range[-1]]), np.max(yd[y_range[0]:y_range[-1]]), np.min(yd[y_range[0]:y_range[-1]])],
aspect='auto')

@@ -272,3 +273,3 @@ elif hasattr(dat.flags, 'elev') and dat.flags.elev:

vmax=clims[1],
extent=[np.min(xd), np.max(xd), np.min(yd), np.max(yd)],
extent=[np.min(xd[x_range[0]:x_range[-1]]), np.max(xd[x_range[0]:x_range[-1]]), np.min(yd[y_range[0]:y_range[-1]]), np.max(yd[y_range[0]:y_range[-1]])],
aspect='auto')

@@ -281,7 +282,7 @@ else:

vmax=clims[1],
extent=[np.min(xd), np.max(xd), np.max(yd), np.min(yd)],
extent=[np.min(xd[x_range[0]:x_range[-1]]), np.max(xd[x_range[0]:x_range[-1]]), np.max(yd[y_range[0]:y_range[-1]]), np.min(yd[y_range[0]:y_range[-1]])],
aspect='auto')
if (pick_colors is not None) and pick_colors:
plot_picks(dat, xd, yd, fig=fig, ax=ax, colors=pick_colors, flatten_layer=flatten_layer, just_middle=middle_picks_only)
plot_picks(dat, xd, yd, fig=fig, ax=ax, colors=pick_colors, flatten_layer=flatten_layer, just_middle=middle_picks_only, x_range=x_range)
if not return_plotinfo:

@@ -543,3 +544,3 @@ return fig, ax

def plot_picks(rd, xd, yd, colors=None, flatten_layer=None, fig=None, ax=None, just_middle=False, picknums=None, **plotting_kwargs):
def plot_picks(rd, xd, yd, colors=None, flatten_layer=None, fig=None, ax=None, just_middle=False, picknums=None, x_range=None, **plotting_kwargs):
"""Update the plotting of the current pick.

@@ -559,2 +560,7 @@

"""
if x_range is None:
x_range = (0, -1)
if x_range[-1] == -1:
x_range = (x_range[0], rd.tnum)
if ax is None:

@@ -613,9 +619,11 @@ if fig is not None:

t[:] = np.nan
comb_mask = np.logical_or(mask, np.isnan(rd.picks.samp1[i, :]))
t[~comb_mask] = yd[(rd.picks.samp1[i, :] + offset)[~comb_mask].astype(int)]
b = np.zeros(xd.shape)
b[:] = np.nan
comb_mask = np.logical_or(mask, np.isnan(rd.picks.samp3[i, :]))
b[~comb_mask] = yd[(rd.picks.samp3[i, :] + offset)[~comb_mask].astype(int)]
ax.plot(xd, c, color=cl[1], **plotting_kwargs)
ax.plot(xd, t, color=cl[0], **plotting_kwargs)
ax.plot(xd, b, color=cl[2], **plotting_kwargs)
ax.plot(xd[x_range[0]:x_range[1]], c[x_range[0]:x_range[1]], color=cl[1], **plotting_kwargs)
ax.plot(xd[x_range[0]:x_range[1]], t[x_range[0]:x_range[1]], color=cl[0], **plotting_kwargs)
ax.plot(xd[x_range[0]:x_range[1]], b[x_range[0]:x_range[1]], color=cl[2], **plotting_kwargs)
return fig, ax

@@ -622,0 +630,0 @@

@@ -13,2 +13,3 @@ #! /usr/bin/env python

from scipy.signal import filtfilt, butter, tukey, cheby1, bessel, firwin, lfilter, wiener
from scipy.ndimage import median_filter
from .. import migrationlib

@@ -553,3 +554,3 @@ from ..ImpdarError import ImpdarError

For now this just uses the scipy wiener filter,
For now this just uses the scipy wiener or median filter,
We could experiment with other options though

@@ -564,3 +565,3 @@

noise: float; optional
power of noise reduction, default is the average of the local variance of the image
power of noise reduction for weiner, default is the average of the local variance
ftype: string; optional

@@ -583,2 +584,4 @@ filter type

self.data = wiener(self.data, mysize=(vert_win, hor_win), noise=noise)
elif ftype == 'median':
self.data = median_filter(self.data, size=(vert_win, hor_win))
else:

@@ -585,0 +588,0 @@ raise ValueError('Only the wiener filter has been implemented for denoising.')

@@ -19,2 +19,3 @@ #! /usr/bin/env python

def reverse(self):

@@ -142,3 +143,3 @@ """Reverse radar data

d = minimize(optimize_moveout_depth,.5*t*uice,args=(t,ant_sep,d_interp,u_interp),
tol=1e-8,bounds=((0,0.5*t*uair),))['x'][0]
tol=1e-8,bounds=((0, 0.5*t*uair),))['x'][0]
u_rms = np.sqrt(np.mean(u_interp[d_interp<d]**2.))

@@ -305,2 +306,3 @@ # get the upper leg of the trave_path triangle (direct arrival) from the antenna separation and the rms velocity

self.snum = self.data.shape[0]
mintrig = 0
else:

@@ -324,2 +326,6 @@ # pretrig, vector input

if top_or_bottom == 'top':
if self.picks is not None:
self.picks.crop(ind)
try:

@@ -444,2 +450,6 @@ self.flags.crop[0] = 1

self.trace_int = trace_int
if hasattr(self, 'picks') and self.picks is not None:
self.picks.restack(traces)
for var, val in oned_newdata.items():

@@ -559,4 +569,7 @@ setattr(self, var, val)

if self.picks is not None:
for attr in ['samp1', 'samp2', 'samp3', 'power', 'time']:
for attr in ['samp1', 'samp2', 'samp3']:
if getattr(self.picks, attr) is not None:
setattr(self.picks, attr, np.round(interp1d(temp_dist, getattr(self.picks, attr)[:, good_vals])(new_dists)))
for attr in ['power', 'time']:
if getattr(self.picks, attr) is not None:
setattr(self.picks, attr, interp1d(temp_dist, getattr(self.picks, attr)[:, good_vals])(new_dists))

@@ -614,8 +627,12 @@

top_inds = (elev_diffs / dz_avg).astype(int)
for i in range(self.data.shape[1]):
left_ind = int(elev_diffs[i] // dz_avg)
self.data[left_ind: left_ind + data_old.shape[0], i] = data_old[:, i]
top_ind = top_inds[i]
self.data[top_ind: top_ind + data_old.shape[0], i] = data_old[:, i]
if hasattr(self, 'picks') and self.picks is not None:
self.picks.crop(-top_inds)
self.elevation = np.hstack((np.arange(np.max(self.elev), np.min(self.elev), -dz_avg),
np.min(self.elev) - self.nmo_depth))
self.flags.elev = 1
Metadata-Version: 1.0
Name: impdar
Version: 1.1.4.post3
Version: 1.1.4.post5
Summary: Scripts for impulse radar

@@ -5,0 +5,0 @@ Home-page: http://github.com/dlilien/impdar

# ImpDAR: an impulse radar processor
[![DOI](https://zenodo.org/badge/134008583.svg)](https://zenodo.org/badge/latestdoi/134008583) [![Tests](https://github.com/dlilien/ImpDAR/actions/workflows/python-package.yml/badge.svg)](https://github.com/dlilien/ImpDAR/actions/workflows/python-package.yml) [![codecov](https://codecov.io/gh/dlilien/ImpDAR/branch/master/graph/badge.svg?token=R51QB61XNP)](https://codecov.io/gh/dlilien/ImpDAR)
[![DOI](https://zenodo.org/badge/134008583.svg)](https://zenodo.org/badge/latestdoi/134008583) [![Tests](https://github.com/dlilien/ImpDAR/actions/workflows/python-package.yml/badge.svg)](https://github.com/dlilien/ImpDAR/actions/workflows/python-package.yml) [![codecov](https://codecov.io/gh/dlilien/ImpDAR/branch/main/graph/badge.svg?token=R51QB61XNP)](https://codecov.io/gh/dlilien/ImpDAR)

@@ -5,0 +5,0 @@ ImpDAR is a suite of processing and interpretation tools for impulse radar (targeted for ice-penetrating radar applications but usable for ground-penetrating radar as well). The core processing steps and general terminology come from of the St. Olaf Deep Radar processor, but everything is re-written in python and significant improvements to speed, readability, documentation, and interface have been made across the board. However, this code has a lot of history of contributors--acknowledgment of many of them are preserved in the file headers.

@@ -42,3 +42,3 @@ #! /usr/bin/env python

version = '1.1.4.post3'
version = '1.1.4.post5'
packages = ['impdar',

@@ -51,2 +51,3 @@ 'impdar.lib',

'impdar.lib.RadarData',
'impdar.lib.ApresData',
'impdar.lib.migrationlib']

@@ -53,0 +54,0 @@

Sorry, the diff of this file is too big to display