You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

impdar

Package Overview
Dependencies
Maintainers
1
Versions
26
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

impdar - pypi Package Compare versions

Comparing version
1.1.4.post5
to
1.1.5
+104
impdar/bin/apdar.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 David Lilien <david.lilien@umanitoba.ca>
#
# Distributed under terms of the GNU GPL3.0 license.
"""
Make an executable for single actions of ApRES handling.
"""
import sys
import os.path
import argparse
from impdar.lib.ApresData import load_apres
def _get_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='Choose a processing step')
# Horizontal window filter
parser_load = _add_procparser(subparsers,
'load',
'load apres data',
lambda x: x,
defname='load')
_add_def_args(parser_load)
return parser
def _add_simple_procparser(subparsers, name, helpstr, func, defname='proc'):
"""Add a sub parser that can do a simple thing with no arguments."""
parser = _add_procparser(subparsers, name, helpstr, func, defname=defname)
_add_def_args(parser)
return parser
def _add_procparser(subparsers, name, helpstr, func, defname='proc'):
"""Wrap adding subparser because we mostly want the same args."""
parser = subparsers.add_parser(name, help=helpstr)
parser.set_defaults(func=func, name=defname)
return parser
def _add_def_args(parser):
"""Set some default arguments common to the different processing types."""
parser.add_argument('fns',
type=str,
nargs='+',
help='The files to process')
parser.add_argument('-o',
type=str,
help='Output to this file (folder if multiple inputs)')
def main():
"""Get arguments, process data, handle saving."""
parser = _get_args()
args = parser.parse_args(sys.argv[1:])
if not hasattr(args, 'func'):
parser.parse_args(['-h'])
apres_data = [load_apres.load_apres_single_file(fn) for fn in args.fns]
if args.name == 'load':
pass
else:
for dat in apres_data:
args.func(dat, **vars(args))
if args.name == 'load':
name = 'raw'
else:
name = args.name
if args.o is not None:
if ((len(apres_data) > 1) or (args.o[-1] == '/')):
for d, f in zip(apres_data, args.fns):
bn = os.path.split(os.path.splitext(f)[0])[1]
if bn[-4:] == '_raw':
bn = bn[:-4]
out_fn = os.path.join(args.o, bn + '_{:s}.h5'.format(name))
d.save(out_fn)
else:
out_fn = args.o
apres_data[0].save(out_fn)
else:
for d, f in zip(apres_data, args.fns):
bn = os.path.splitext(f)[0]
if bn[-4:] == '_raw':
bn = bn[:-4]
out_fn = bn + '_{:s}.h5'.format(name)
d.save(out_fn)
def crop(dat, lim=0, top_or_bottom='top', dimension='snum', **kwargs):
"""Crop in the vertical."""
dat.crop(lim, top_or_bottom=top_or_bottom, dimension=dimension)
if __name__ == '__main__':
main()
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Load TEK files (e.g. 01-02 UW Kamb data) and convert to the .mat ImpDAR format
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
December 9 2020
"""
import numpy as np
from ..RadarData import RadarData
from ..RadarFlags import RadarFlags
def fread(fid, nelements, dtype):
"""Equivalent to Matlab fread function"""
if dtype is np.str:
dt = np.uint8 # WARNING: assuming 8-bit ASCII for np.str!
else:
dt = dtype
data_array = np.fromfile(fid, dt, nelements)
data_array.shape = (nelements, 1)
return data_array
def load_tek(fn_tek, magnets_per_wheel=1, wheel_diameter=0.5, trigger_level=0.1, trigger_sample=None,
channel=1, *args, **kwargs):
"""Load a TEK file into ImpDAR
Read file following http://dx.doi.org/10.7265/N5736NTS"""
# Import the matlab file and setup a RadarData class to fill
tek_data = RadarData(None)
tek_data.fn = fn_tek
# Initialize empty arrays
tek_data.decday = np.array([]).astype(np.float32)
wheel_count = np.array([]).astype(np.ushort)
tek_data.pressure = np.array([]).astype(np.short)
yinc = np.array([]).astype(np.float32)
xinc = np.array([]).astype(np.float32)
averages = np.array([]).astype(np.ushort)
length = np.array([]).astype(np.ushort)
tek_data.data = np.empty((0,1000)).astype(np.ushort)
# Open file to read in the data
fid = open(fn_tek,'rb')
more_lines=True
while more_lines:
try:
tek_data.decday = np.append(tek_data.decday,fread(fid, 1, np.float32))
wheel_count = np.append(wheel_count,fread(fid, 1, np.ushort))
tek_data.pressure = np.append(tek_data.pressure,fread(fid, 1, np.short))
yinc = np.append(yinc,fread(fid, 1, np.float32))
xinc = np.append(xinc,fread(fid, 1, np.float32))
averages = np.append(averages,fread(fid, 1, np.ushort))
length = np.append(length,fread(fid, 1, np.ushort))
tek_data.data = np.append(tek_data.data,
np.transpose(fread(fid, length[-1], np.ushort)),axis=0)
except:
more_lines=False
fid.close()
# Transpose and normalize data around zero
tek_data.data = np.transpose(tek_data.data)
tek_data.data.dtype = np.short
tek_data.data -= 512
tek_data.snum = tek_data.data.shape[0]
tek_data.tnum = tek_data.data.shape[1]
tek_data.trace_num = np.arange(tek_data.tnum)
# Distance array from wheel count
tek_data.dist = wheel_count.astype(np.float32)
tek_data.dist *= np.pi*wheel_diameter/magnets_per_wheel
tek_data.trace_int = np.gradient(tek_data.dist)
# Time step
tek_data.dt = np.median(xinc)
# Trigger sample
tek_data.trig_level = trigger_level
if trigger_sample is None:
avg_trace = np.mean(tek_data.data,axis=1)
exceeds_level = np.abs(np.gradient(avg_trace))>tek_data.trig_level*np.max(np.abs(avg_trace))
trigger_sample = next(x[0] for x in enumerate(exceeds_level) if x[1] > 0.7)
tek_data.trig = trigger_sample*np.ones(tek_data.tnum)
tek_data.travel_time = (-trigger_sample+np.arange(tek_data.snum))*tek_data.dt*1e6
# convert Pressure to an array of differences from 1
tek_data.pressure -= tek_data.pressure[0]
# Get variables that are already in the St Olaf .mat file
tek_data.chan = channel
# Initialize the flags
tek_data.flags = RadarFlags()
# Check attributed in the RadarData class and return
tek_data.check_attrs()
return tek_data
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Load an output file processed from a University of Alabama VHF, UWB, or UHF radar.
Data are all processed, but some more complex data views are not available (and some simple things,
like constant trace spacing, are not done).
This allows easy colorbar adjustment, interpolation to geospatial coordinates, and layer picking.
"""
import numpy as np
from scipy.interpolate import interp1d
from ..RadarData import RadarData, RadarFlags
from ..gpslib import nmea_info
try:
import h5py
H5 = True
except ImportError:
H5 = False
def load_UoA_mat(fn_mat, gps_offset=0.0):
"""Load data processed by the UoA RSC Matlab processor
Parameters
----------
fn: str
The filename to load
"""
UoA_data = RadarData(None)
UoA_data.fn = fn_mat
with h5py.File(fn_mat, 'r') as fin:
UoA_data.data = fin['Data']['channel'][:, :].T
# if len(UoA_data.data.dtype) == 2:
# UoA_data.data = UoA_data.data['real'] + 1.j * UoA_data.data['imag']
# complex data
if len(UoA_data.data.dtype) == 2:
UoA_data.data = 10 * np.log10(np.sqrt(UoA_data.data['real'] ** 2.0 + UoA_data.data['imag'] ** 2.0))
else:
UoA_data.data = 10 * np.log10(UoA_data.data)
UoA_data.snum, UoA_data.tnum = int(UoA_data.data.shape[0]), int(UoA_data.data.shape[1])
UoA_data.trace_num = np.arange(UoA_data.tnum) + 1
UoA_data.travel_time = fin['Data']['fast_time'][:].flatten() * 1.0e6
UoA_data.dt = np.mean(np.diff(UoA_data.travel_time)) * 1.0e-6
nminfo = nmea_info()
nminfo.time = (fin['INS_GPS']['POSIX_time'][:].flatten() + gps_offset) / (24. * 60. * 60.) # Stored in seconds
nminfo.ppstime = fin['INS_GPS']['POSIX_time'][:].flatten() + gps_offset
nminfo.lat = fin['INS_GPS']['latitude'][:].flatten()
nminfo.lon = fin['INS_GPS']['longitude'][:].flatten()
nminfo.elev = fin['INS_GPS']['altitude_MSL'][:].flatten()
UoA_data.lat = interp1d(nminfo.ppstime, nminfo.lat, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.long = interp1d(nminfo.ppstime, nminfo.lon, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.elev = interp1d(nminfo.ppstime, nminfo.elev, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.decday = interp1d(nminfo.ppstime, nminfo.time, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
try:
UoA_data.get_projected_coords()
except ImportError:
pass
UoA_data.trace_int = UoA_data.decday[1] - UoA_data.decday[0]
UoA_data.pressure = np.zeros_like(UoA_data.decday)
UoA_data.trig = np.zeros_like(UoA_data.decday).astype(int)
UoA_data.trig_level = 0.
UoA_data.flags = RadarFlags()
UoA_data.flags.power = False
if fn_mat[-10:] == '_files.mat':
UoA_data.chan = 999
else:
if 'hannel' in fn_mat:
idx = fn_mat.index('hannel')
UoA_data.chan = int(fn_mat[idx + 6])
elif 'Ch' in fn_mat:
idx = fn_mat.index('Ch')
UoA_data.chan = int(fn_mat[idx + 2])
else:
UoA_data.chan = 10
UoA_data.check_attrs()
return UoA_data
def load_UoA_h5(fn, gps_offset=0.0):
"""Load MCoRDS data in .mat format downloaded from the CReSIS ftp client
Parameters
----------
fn: str
The filename to load
"""
data_list = []
with h5py.File(fn, 'r') as fin:
if fin.attrs['Type'] != 'MultiChannel':
raise ValueError('Can only unpack MultiChannel UoA data')
if 'processed' in fin:
for name in fin['processed'].keys():
for integrator in fin['processed'][name].keys():
grp = fin['processed'][name][integrator]
UoA_data = RadarData(None)
UoA_data.fn = fn[:-3] + name + '_Int' + integrator[-1]
UoA_data.chan = 999
_load_group(UoA_data, grp, gps_offset)
data_list.append(UoA_data)
else:
print('No processed data found, reading channels')
for i in range(8):
if f'channel_{i}' in fin:
for integrator in fin[f'channel_{i}'].keys():
grp = fin[f'channel_{i}'][integrator]
UoA_data = RadarData(None)
UoA_data.fn = fn[:-3] + f'_ch{i}_Int' + integrator[-1]
UoA_data.chan = i
_load_group(UoA_data, grp, gps_offset)
data_list.append(UoA_data)
return data_list
def _load_group(UoA_data, grp, gps_offset):
UoA_data.data = grp['Chirps'][()]
# complex data
if (len(UoA_data.data.dtype) == 2) or (UoA_data.data.dtype in [np.complex64, np.complex128]):
UoA_data.data = 10.0 * np.log10(np.sqrt(np.real(UoA_data.data) ** 2.0 + np.imag(UoA_data.data) ** 2.0))
else:
UoA_data.data = 10.0 * np.log10(UoA_data.data)
UoA_data.snum, UoA_data.tnum = int(UoA_data.data.shape[0]), int(UoA_data.data.shape[1])
UoA_data.trace_num = np.arange(UoA_data.tnum) + 1
UoA_data.travel_time = grp['_time'][()] * 1.0e6
UoA_data.dt = np.mean(np.diff(UoA_data.travel_time)) * 1.0e-6
if 'datetime' in grp:
nminfo = nmea_info()
dt = grp['datetime'][()].astype('datetime64[ms]').astype(int) / 1000.0
nminfo.time = (dt + gps_offset) / (24. * 60. * 60.) # Stored in seconds
nminfo.ppstime = dt + gps_offset
nminfo.lat = grp['lat'][:].flatten()
nminfo.lon = grp['lon'][:].flatten()
nminfo.elev = np.zeros_like(nminfo.lat)
UoA_data.lat = interp1d(nminfo.ppstime, nminfo.lat, fill_value='extrapolate')(dt)
UoA_data.long = interp1d(nminfo.ppstime, nminfo.lon, fill_value='extrapolate')(dt)
UoA_data.elev = np.zeros_like(UoA_data.lat)
UoA_data.elev[:] = np.nan
UoA_data.decday = interp1d(nminfo.ppstime, nminfo.time, fill_value='extrapolate')(dt)
if 'x' in grp:
UoA_data.x_coord = grp['x'][()]
UoA_data.y_coord = grp['y'][()]
else:
try:
UoA_data.get_projected_coords()
except ImportError:
pass
else:
print('WARNING: datetime information missing--hopefully this is loopback data???')
UoA_data.lat = np.zeros((UoA_data.tnum,)) * np.nan
UoA_data.long = np.zeros((UoA_data.tnum,)) * np.nan
UoA_data.elev = np.zeros((UoA_data.tnum,)) * np.nan
UoA_data.decday = np.zeros((UoA_data.tnum,))
UoA_data.trace_int = UoA_data.decday[1] - UoA_data.decday[0]
UoA_data.pressure = np.zeros_like(UoA_data.decday)
UoA_data.trig = np.zeros_like(UoA_data.decday).astype(int)
UoA_data.trig_level = 0.
UoA_data.flags = RadarFlags()
UoA_data.flags.power = False
UoA_data.check_attrs()
+1
-0
[console_scripts]
apdar = impdar.bin.apdar:main
impdar = impdar.bin.impdarexec:main

@@ -3,0 +4,0 @@ imppick = impdar.bin.imppick:main

@@ -1,4 +0,4 @@

Metadata-Version: 1.0
Metadata-Version: 2.1
Name: impdar
Version: 1.1.4.post5
Version: 1.1.5
Summary: Scripts for impulse radar

@@ -9,3 +9,48 @@ Home-page: http://github.com/dlilien/impdar

License: GNU GPL-3.0
Description: UNKNOWN
Description: # ImpDAR: an impulse radar processor
[![DOI](https://zenodo.org/badge/134008583.svg)](https://zenodo.org/badge/latestdoi/134008583) [![Tests](https://github.com/dlilien/ImpDAR/actions/workflows/python-package.yml/badge.svg)](https://github.com/dlilien/ImpDAR/actions/workflows/python-package.yml) [![codecov](https://codecov.io/gh/dlilien/ImpDAR/branch/main/graph/badge.svg?token=R51QB61XNP)](https://codecov.io/gh/dlilien/ImpDAR)
ImpDAR is a suite of processing and interpretation tools for impulse radar (targeted for ice-penetrating radar applications but usable for ground-penetrating radar as well). The core processing steps and general terminology come from of the St. Olaf Deep Radar processor, but everything is re-written in python and significant improvements to speed, readability, documentation, and interface have been made across the board. However, this code has a lot of history of contributors--acknowledgment of many of them are preserved in the file headers.
ImpDAR is intended to be more flexible than other available options. Support is gradually being added for a variety of file formats. Currently, [GSSI](http://www.geophysical.com), [PulseEKKO](http://www.sensoft.ca), [Ramac](http://www.malagpr.com), [Blue Systems](http://www.bluesystem.ca/ice-penetrating-radar.html), DELORES, SEGY, [gprMAX](http://www.gprmax.com), Gecko, and legacy StoDeep files are supported. Available processing steps include various filtering operations, trivial modifications such as restacking, cropping, or reversing data, and a few different geolocation-related operations like interpolating to constant trace spacing. The integrated migration routines are in development but Stolt is working.
The primary interface to ImpDAR is through the command line, which allows efficient processing of large volumes of data. An API, centered around the RadarData class, is also available to allow the user to use ImpDAR in other programs.
In addition to processing, ImpDAR can also be used for interpreting the radargrams (i.e. picking layers). Picking is generally an interactive process, and there is a GUI for doing the picking; the GUI requires PyQt5, which may be annoying as a source build but is easy to install with Anaconda. The GUI also allows for basic processing steps, with the updates happening to the plot in real time. However, we have not added an 'undo' button to the GUI, so you are stuck with going back to your last saved version if you do not like the interactive results.
## Documentation
Documentation of the various processing steps is [here](https://impdar.readthedocs.io/en/latest/). There are examples of basic processing and plotting, and a longer example showing migration.
## Installation
Easiest is `pip install impdar`. Some explanation of other options is available in the main documentation, but PyPi will be updated with each release.
### Dependencies
#### Required
*Python 3* The package is tested on Python 3.7+. Older versions may work, but we have stopped testing on 2.7 since it has reached end of life. You can probably get 2.7 to work still, but no guarantees.
You also need:
[numpy](http://www.scipy.org),
[scipy](http://numpy.org),
[matplotlib](http://matplotlib.org),
[SegYIO](https://github.com/equinor/segyio/) for SEGY support and for SeisUnix migration, and
[h5py](https://h5py.org) is needed for some data formats.
I recommend just using Anaconda for your install, since it will also get you PyQt and therefore enable the GUI.
#### Recommended
[GDAL](http://gdal.org) is needed to reproject out of WGS84, and thus for proper distance measurement. Otherwise, distance calculations re going to moderately or severely incorrect.
[PyQt5](https://pypi.org/project/PyQt5/) is needed to run the GUI, which is needed for picking. You can do everything from the command line, and plot the results with matplotlib, without PyQt5.
Depending on whether you need migration routines, there may be some external dependencies. ImpDAR is designed to interface with [SeisUnix](http://https://github.com/JohnWStockwellJr/SeisUnix), which contains a number of powerful migration routines. You need to install SeisUnix yourself and get it on your path. If you are running windows, you need to figure out how to use Cygwin as well. However, the pure python migration routines in ImpDAR can work quite well, so don't let the difficulty of installing these compiled routines stop you from using those. ImpDAR searches for SeisUnix at the time of the call to the migration routine, so you can always add this later if you find that you need it.
## Contributing
Support for different radar data types has primarily been added as needed--contributions for data readers for other systems, whether commercial or custom, are always welcome.
Platform: UNKNOWN
Description-Content-Type: text/markdown
+3
-1

@@ -11,2 +11,3 @@ README.md

impdar/bin/__init__.py
impdar/bin/apdar.py
impdar/bin/impdarexec.py

@@ -47,3 +48,3 @@ impdar/bin/imppick.py

impdar/lib/load/__init__.py
impdar/lib/load/load_UoA_mat.py
impdar/lib/load/load_UoA.py
impdar/lib/load/load_bsi.py

@@ -61,2 +62,3 @@ impdar/lib/load/load_delores.py

impdar/lib/load/load_stomat.py
impdar/lib/load/load_tek.py
impdar/lib/load/loading_utils.py

@@ -63,0 +65,0 @@ impdar/lib/migrationlib/__init__.py

@@ -41,2 +41,6 @@ #! /usr/bin/env python

help='Interpolate or delete bad GPS. Only used by BSI.')
parser_load.add_argument('-dname',
type=str,
help='Name of data field',
default='data')

@@ -43,0 +47,0 @@ # Options for processing data

@@ -393,3 +393,3 @@ #! /usr/bin/env python

if args.o is not None:
if len(radar_data) > 1:
if ((len(radar_data) > 1) or (args.o[-1] == '/')):
for d, f in zip(radar_data, args.fns):

@@ -396,0 +396,0 @@ bn = os.path.split(os.path.splitext(f)[0])[1]

@@ -5,3 +5,3 @@ #! /usr/bin/env python

#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
# Copyright © 2019 Benjamin Hills <bhills@uw.edu>
#

@@ -22,9 +22,11 @@ # Distributed under terms of the GNU GPL-3.0 license.

Sept 24 2019
"""
import os
import datetime
import datetime
import numpy as np
from scipy.io import loadmat
import h5py
from .ApresFlags import ApresFlags

@@ -64,11 +66,11 @@ from .ApresHeader import ApresHeader

'temperature2',
'battery_voltage']
'battery_voltage',
'Rcoarse']
# TODO: add imports
#from ._ApresDataProcessing import
#from ._ApresDataSaving import
from ._ApresDataProcessing import apres_range, phase_uncertainty, phase2range, range_diff, stacking
from ._ApresDataSaving import save
# Now make some load/save methods that will work with the matlab format
def __init__(self, fn_mat):
if fn_mat is None:
def __init__(self, fn):
if fn is None:
# Write these out so we can document them

@@ -78,3 +80,3 @@ # Very basics

self.cnum = None #: int, the number of chirps in a burst
self.bnum = None #: int, the number of bursts
self.bnum = None #: int, the number of bursts
self.data = None #: np.ndarray(snum x tnum) of the actual return power

@@ -101,2 +103,3 @@ self.dt = None #: float, The spacing between samples in travel time, in seconds

self.travel_time = None
self.Rcoarse = None

@@ -118,14 +121,24 @@ #: np.ndarray(tnum,) Optional. Projected x-coordinate along the profile.

# TODO: add a matlab load
mat = loadmat(fn_mat)
for attr in self.attrs_guaranteed:
if mat[attr].shape == (1, 1):
setattr(self, attr, mat[attr][0][0])
elif mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1:
setattr(self, attr, mat[attr].flatten())
else:
setattr(self, attr, mat[attr])
# We may have some additional variables
for attr in self.attrs_optional:
if attr in mat:
if os.path.splitext(fn)[1] == '.h5':
with h5py.File(fn, 'r') as fin:
grp = fin['dat']
for attr in grp.keys():
if attr in ['ApresFlags', 'ApresHeader']:
continue
val = grp[attr][:]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
for attr in grp.attrs.keys():
val = grp.attrs[attr]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
self.flags = ApresFlags()
self.header = ApresHeader()
self.flags.read_h5(grp)
self.header.read_h5(grp)
else:
mat = loadmat(fn)
for attr in self.attrs_guaranteed:
if mat[attr].shape == (1, 1):

@@ -137,11 +150,22 @@ setattr(self, attr, mat[attr][0][0])

setattr(self, attr, mat[attr])
else:
setattr(self, attr, None)
# We may have some additional variables
for attr in self.attrs_optional:
if attr in mat:
if mat[attr].shape == (1, 1):
setattr(self, attr, mat[attr][0][0])
elif mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1:
setattr(self, attr, mat[attr].flatten())
else:
setattr(self, attr, mat[attr])
else:
setattr(self, attr, None)
self.data_dtype = self.data.dtype
self.data_dtype = self.data.dtype
self.flags = ApresFlags()
self.flags.from_matlab(mat['flags'])
self.fn = fn_mat
self.flags = ApresFlags()
self.fn = fn
self.header = ApresHeader()
self.flags.from_matlab(mat['flags'])
self.header.from_matlab(mat['header'])
self.check_attrs()

@@ -167,9 +191,4 @@

for attr in self.attrs_optional:
if not hasattr(self, attr):
raise ImpdarError('{:s} is missing. \
It appears that this is an ill-defined RadarData object'.format(attr))
if not hasattr(self, 'data_dtype') or self.data_dtype is None:
self.data_dtype = self.data.dtype
self.data_dtype = self.shh.dtype
return

@@ -176,0 +195,0 @@

@@ -5,3 +5,3 @@ #! /usr/bin/env python

#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
# Copyright © 2019 Benjamin Hills <bhills@uw.edu>
#

@@ -20,3 +20,2 @@ # Distributed under terms of the GNU GPL3 license.

Sept 23 2019
"""

@@ -28,2 +27,3 @@

"""
Range conversion.

@@ -33,3 +33,3 @@ Parameters

self: class
data object
ApresData object
p: int

@@ -98,3 +98,3 @@ pad factor, level of interpolation for fft

# preallocate
# pre-allocate
spec = np.zeros((self.bnum,self.cnum,nf)).astype(np.cdouble)

@@ -121,2 +121,3 @@ spec_cor = np.zeros((self.bnum,self.cnum,nf)).astype(np.cdouble)

self.spec = spec.copy()
self.data_dtype = self.data.dtype

@@ -138,5 +139,4 @@ # precise range measurement

# --------------------------------------------------------------------------------------------
def phase_uncertainty(self):
def phase_uncertainty(self,bed_range):
"""

@@ -149,4 +149,6 @@ Calculate the phase uncertainty using a noise phasor.

---------
self: class
ApresData object
Output
Returns
--------

@@ -157,3 +159,2 @@ phase_uncertainty: array

uncertainty in the range (m) calculated from phase uncertainty
"""

@@ -164,6 +165,5 @@

# Get measured phasor from the data class, and use the median magnitude for noise phasor
meas_phasor = self.data
median_mag = np.nanmedian(abs(meas_phasor))
median_mag = np.nanmedian(abs(meas_phasor[:,:,np.argwhere(self.Rcoarse>bed_range)]))
# Noise phasor with random phase and magnitude equal to median of measured phasor

@@ -185,4 +185,2 @@ noise_phase = np.random.uniform(-np.pi,np.pi,np.shape(meas_phasor))

# --------------------------------------------------------------------------------------------
def phase2range(phi,lambdac,rc=None,K=None,ci=None):

@@ -205,3 +203,3 @@ """

Output
Returns
--------

@@ -225,3 +223,2 @@ r: float or array

# --------------------------------------------------------------------------------------------

@@ -235,3 +232,3 @@ def range_diff(self,acq1,acq2,win,step,Rcoarse=None,r_uncertainty=None,uncertainty='CR'):

self: class
data object
ApresData object
acq1: array

@@ -253,7 +250,7 @@ first acquisition for comparison

Output
Returns
--------
ds: array
depths at which the correlation coefficient is calculated
phase_diff: array
co: array
correlation coefficient between acquisitions

@@ -264,2 +261,4 @@ amplitude indicates how well reflection packets match between acquisitions

vertical motion in meters
range_diff_unc: array
uncertainty of vertical motion in meters
"""

@@ -309,3 +308,2 @@

# --------------------------------------------------------------------------------------------

@@ -318,2 +316,4 @@ def stacking(self,num_chirps=None):

---------
self: class
ApresData object
num_chirps: int

@@ -320,0 +320,0 @@ number of chirps to average over

@@ -5,5 +5,5 @@ #! /usr/bin/env python

#
# Copyright © 2019 dlilien <dlilien@berens>
# Copyright © 2019 Benjamin Hills <bhills@uw.edu>
#
# Distributed under terms of the MIT license.
# Distributed under terms of the GNU GPL3.0 license.

@@ -13,4 +13,7 @@ """

"""
import os
import numpy as np
import h5py
from scipy.io import savemat

@@ -20,3 +23,4 @@ from .ApresFlags import ApresFlags

def save_apres(self, fn):
def save(self, fn):
"""Save the radar data

@@ -26,3 +30,25 @@

----------
self: class
ApresData object
fn: str
Filename. Extension can be h5 or legacy mat.
"""
ext = os.path.splitext(fn)[1]
if ext in ['.h5', '.hdf5']:
return save_h5(self, fn)
elif ext == '.mat':
return save_mat(self, fn)
else:
raise ValueError('File extension choices are .h5 and .mat (legacy)')
def save_mat(self, fn):
"""Save the radar data as an ImpDAR .mat file
Parameters
----------
self: class
ApresData object
fn: str
Filename. Should have a .mat extension

@@ -48,24 +74,97 @@ """

if self.header is not None:
mat['header'] = self.header.to_matlab()
else:
# We want the structure available to prevent read errors from corrupt files
mat['header'] = ApresHeader().to_matlab()
if 'header' in vars(self):
if self.header is not None:
mat['header'] = self.header.to_matlab()
else:
# We want the structure available to prevent read errors from corrupt files
mat['header'] = ApresHeader().to_matlab()
# Make sure not to expand the size of the data due to type conversion
if hasattr(self, 'data_dtype') and self.data_dtype is not None and self.data_dtype != mat['data'].dtype:
# Be carefuly of obliterating NaNs
# We will use singles instead of ints for this guess
if (self.data_dtype in [int, np.int8, np.int16]) and np.any(np.isnan(mat['data'])):
print('Warning: new file is float16 rather than ', self.data_dtype, ' since we now have NaNs')
mat['data'] = mat['data'].astype(np.float16)
elif (self.data_dtype in [np.int32]) and np.any(np.isnan(mat['data'])):
print('Warning: new file is float32 rather than ', self.data_dtype, ' since we now have NaNs')
mat['data'] = mat['data'].astype(np.float32)
elif (self.data_dtype in [np.int64]) and np.any(np.isnan(mat['data'])):
print('Warning: new file is float64 rather than ', self.data_dtype, ' since we now have NaNs')
mat['data'] = mat['data'].astype(np.float64)
else:
mat['data'] = mat['data'].astype(self.data_dtype)
# Make sure not to expand the size of the data due to type conversion
if hasattr(self, 'data_dtype') and self.data_dtype is not None and self.data_dtype != mat['data'].dtype:
# Be carefuly of obliterating NaNs
# We will use singles instead of ints for this guess
if (self.data_dtype in [int, np.int8, np.int16]) and np.any(np.isnan(mat['data'])):
print('Warning: new file is float16 rather than ', self.data_dtype, ' since we now have NaNs')
mat['data'] = mat['data'].astype(np.float16)
elif (self.data_dtype in [np.int32]) and np.any(np.isnan(mat['data'])):
print('Warning: new file is float32 rather than ', self.data_dtype, ' since we now have NaNs')
mat['data'] = mat['data'].astype(np.float32)
elif (self.data_dtype in [np.int64]) and np.any(np.isnan(mat['data'])):
print('Warning: new file is float64 rather than ', self.data_dtype, ' since we now have NaNs')
mat['data'] = mat['data'].astype(np.float64)
else:
mat['data'] = mat['data'].astype(self.data_dtype)
savemat(fn, mat)
def save_h5(self, fn):
"""Save the radar data as an h5 file
Parameters
----------
self: class
ApresData object
fn: str
Filename. Should have a .h5 extension
"""
with h5py.File(fn, 'w') as f:
save_as_h5_group(self, f, 'dat')
def save_as_h5_group(self, h5_file_descriptor, groupname='dat'):
"""Save to a group in h5 file (useful to group multiple datasets to one file
Parameters
----------
self: class
ApresData object
h5_file_descriptor: open hdf5 file
The file object to write to. Can also be a group (if data should be a subgroup).
groupname: str, optional
The name this (sub)group should have.
"""
grp = h5_file_descriptor.create_group(groupname)
for attr in self.attrs_guaranteed:
val = getattr(self, attr)
if val is not None:
if hasattr(val, 'shape') and np.any([s != 1 for s in val.shape]):
if val.dtype == 'O':
if hasattr(self, 'data_dtype') and self.data_dtype is not None:
dtype = self.data_dtype
else:
dtype = 'f'
else:
dtype = val.dtype
grp.create_dataset(attr, data=val, dtype=dtype)
else:
grp.attrs.create(attr, val)
else:
grp.attrs[attr] = h5py.Empty('f')
for attr in self.attrs_optional:
if hasattr(self, attr) and getattr(self, attr) is not None:
val = getattr(self, attr)
if hasattr(val, 'shape') and np.any([s != 1 for s in val.shape]):
if val.dtype == 'O':
if hasattr(self, 'data_dtype') and self.data_dtype is not None:
dtype = self.data_dtype
else:
dtype = 'f'
else:
# override the dtype for data
if attr == 'data':
dtype = self.data_dtype
dtype = val.dtype
grp.create_dataset(attr, data=val, dtype=dtype)
else:
grp.attrs.create(attr, val)
if self.flags is not None:
self.flags.write_h5(grp)
else:
ApresFlags().write_h5(grp)
if self.header is not None:
self.header.write_h5(grp)
else:
ApresHeader().write_h5(grp)

@@ -5,3 +5,3 @@ #! /usr/bin/env python

#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
# Copyright © 2019 Benjamin Hills <bhills@uw.edu>
#

@@ -15,2 +15,3 @@ # Distributed under terms of the GNU GPL3.0 license.

import numpy as np
import h5py

@@ -26,18 +27,6 @@ class ApresFlags():

Legacy indication of whether we are batch processing. Always False.
agc: bool
Automatic gain control has been applied.
reverse: bool
Data have been reversed.
restack: bool
Data have been restacked.
rgain: bool
Data have a linear range gain applied.
bpass: 3x1 :class:`numpy.ndarray`
Elements: (1) 1 if bandpassed; (2) Low; and (3) High (MHz) bounds
hfilt: 2x1 :class:`numpy.ndarray`
Elements: (1) 1 if horizontally filtered; (2) Filter type
interp: 2x1 :class:`numpy.ndarray`
Elements: (1) 1 if constant distance spacing applied (2) The constant spacing (m)
mig: 2x1 :class: String
None if no migration done, mtype if migration done.
range: float
max range
stack: int
number of chirps stacked
"""

@@ -49,9 +38,35 @@

self.stack = 1
self.attrs = ['file_read_code','phase2range','stack']
self.attr_dims = [None,None,None]
self.attrs = ['file_read_code', 'range', 'stack']
self.attr_dims = [None, None, None]
def write_h5(self, grp):
"""Write to a subgroup in hdf5 file
Parameters
----------
grp: h5py.Group
The group to which the ApresFlags subgroup is written
"""
subgrp = grp.create_group('ApresFlags')
for attr in self.attrs:
val = getattr(self, attr)
if val is None:
subgrp[attr] = h5py.Empty('f')
else:
if hasattr(val, 'dtype'):
val = val.astype('f')
subgrp.attrs[attr] = val
def read_h5(self, grp):
subgrp = grp['ApresFlags']
for attr in subgrp.attrs.keys():
val = subgrp.attrs[attr]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
def to_matlab(self):
"""Convert all associated attributes into a dictionary formatted for use with :func:`scipy.io.savemat`
"""
outmat = {att: getattr(self, att) for att in self.attrs}
outmat = {att: (getattr(self, att) if getattr(self, att) is not None else np.NaN) for att in self.attrs}
return outmat

@@ -58,0 +73,0 @@

@@ -5,3 +5,3 @@ #! /usr/bin/env python

#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
# Copyright © 2019 Benjamin Hills <bhills@uw.edu>
#

@@ -25,9 +25,8 @@ # Distributed under terms of the GNU GPL3 license.

Sept 23 2019
"""
import numpy as np
import h5py
import re
# --------------------------------------------------------------------------------------------

@@ -59,3 +58,25 @@ class ApresHeader():

self.ramp_dir = None
self.f1 = None
self.bandwidth = None
self.fc = None
self.er = None
self.ci = None
self.lambdac = None
self.n_attenuators = None
self.attenuator1 = None
self.attenuator2 = None
self.tx_ant = None
self.rx_ant = None
self.attrs = ['fsysclk','fs','fn','header_string','file_format','noDwellHigh','noDwellLow',
'f0','f_stop','ramp_up_step','ramp_down_step','tstep_up','tstep_down','snum','nsteps_DDS',
'chirp_length','chirp_grad','nchirp_samples','ramp_dir','f1','bandwidth','fc','er','ci','lambdac',
'n_attenuators','attenuator1','attenuator2','tx_ant','rx_ant']
self.attr_dims = ['none','none','none','none','none',
'none','none','none','none','none',
'none','none','none','none','none',
'none','none','none','none','none',
'none','none','none','none','none',
'none','none','none','none','none']
# --------------------------------------------------------------------------------------------

@@ -73,5 +94,2 @@

maximum length of header to read (can be too long)
Output
---------
"""

@@ -83,4 +101,2 @@ self.fn = fn_apres

# --------------------------------------------------------------------------------------------
def get_file_format(self):

@@ -112,3 +128,2 @@ """

### Original Matlab Notes ###

@@ -159,7 +174,2 @@ Extract from the hex codes the actual paramaters used by RMB2

#elif case == 'Reg08':
# # Phase offset word Register (POW) Address 0x08. 2 Bytes dTheta = 360*POW/2^16.
# val = char(reg{1,2}(k));
# H.phaseOffsetDeg = hex2dec(val(1:4))*360/2^16;
elif case == 'Reg0B':

@@ -201,6 +211,6 @@ # Digital Ramp Limit Register Address 0x0B

self.fs = output[0]
if self.fs == 1: # if self.fs > 70e3:
self.fs = 8e4 # self.fs = 80e3
else: # else
self.fs = 4e4 # self.fs = 40e3
if self.fs == 1:
self.fs = 8e4
else:
self.fs = 4e4

@@ -228,8 +238,33 @@ self.snum = int(output[1])

# --------------------------------------------------------------------------------------------
def write_h5(self, grp):
"""
Write to a subgroup in hdf5 file
Parameters
----------
grp: h5py.Group
The group to which the ApresHeader subgroup is written
"""
subgrp = grp.create_group('ApresHeader')
for attr in vars(self):
val = getattr(self, attr)
if val is None:
subgrp.attrs[attr] = h5py.Empty("f")
else:
if hasattr(val, 'dtype'):
val = val.astype('f')
subgrp.attrs[attr] = val
def read_h5(self, grp):
subgrp = grp['ApresHeader']
for attr in subgrp.attrs.keys():
val = subgrp.attrs[attr]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
def to_matlab(self):
"""Convert all associated attributes into a dictionary formatted for use with :func:`scipy.io.savemat`
"""
outmat = {att: getattr(self, att) for att in vars(self)}
outmat = {att: (getattr(self, att) if getattr(self, att) is not None else np.NaN) for att in vars(self)}
return outmat

@@ -244,3 +279,3 @@

# were lazily appended to be arrays, but we preallocate
if attr_dim is not None and getattr(self, attr).shape[0] == 1:
if attr_dim != 'none' and getattr(self, attr).shape[0] == 1:
setattr(self, attr, np.zeros((attr_dim, )))

@@ -247,0 +282,0 @@

@@ -24,13 +24,16 @@ #! /usr/bin/env python

Sept 23 2019
"""
import os
import numpy as np
from scipy.io import loadmat
import datetime
import re
from . import ApresData
from ..ImpdarError import ImpdarError
# -----------------------------------------------------------------------------------------------------
def load_apres(fns_apres,burst=1,fs=40000, *args, **kwargs):
def load_apres(fns_apres, burst=1, fs=40000, *args, **kwargs):
"""Load and concatenate all apres data from several files

@@ -45,3 +48,3 @@

-------
RadarData
out
A single, concatenated output.

@@ -53,4 +56,5 @@ """

try:
apres_data.append(load_apres_single_file(fn,burst=burst,fs=fs,*args,**kwargs))
except:
apres_data.append(load_apres_single_file(
fn, burst=burst, fs=fs, *args, **kwargs))
except ImpdarError:
Warning('Cannot load file: '+fn)

@@ -61,12 +65,14 @@

for dat in apres_data[1:]:
if out.snum != dat.snum:
raise ValueError('Need the same number of vertical samples in each file')
if out.cnum != dat.cnum:
raise ValueError('Need the same number of chirps in each file')
if not np.all(out.travel_time == dat.travel_time):
raise ValueError('Need matching travel time vectors')
if not np.all(out.frequencies == dat.frequencies):
raise ValueError('Need matching frequency vectors')
if len(apres_data)>1:
for dat in apres_data[1:]:
if out.snum != dat.snum:
raise ValueError('Need the same number of vertical samples in each file')
if out.cnum != dat.cnum:
raise ValueError('Need the same number of chirps in each file')
if not np.all(out.travel_time == dat.travel_time):
raise ValueError('Need matching travel time vectors')
if not np.all(out.frequencies == dat.frequencies):
raise ValueError('Need matching frequency vectors')
out.data = np.vstack([[dat.data] for dat in apres_data])

@@ -76,6 +82,6 @@ out.chirp_num = np.vstack([[dat.chirp_num] for dat in apres_data])

out.chirp_time = np.vstack([[dat.chirp_time] for dat in apres_data])
out.time_stamp = np.hstack([dat.time_stamp for dat in apres_data])
out.temperature1 = np.hstack([dat.temperature1 for dat in apres_data])
out.temperature2 = np.hstack([dat.temperature2 for dat in apres_data])
out.battery_voltage = np.hstack([dat.battery_voltage for dat in apres_data])
out.battery_voltage = np.hstack(
[dat.battery_voltage for dat in apres_data])
out.bnum = np.shape(out.data)[0]

@@ -86,3 +92,3 @@

def load_apres_single_file(fn_apres,burst=1,fs=40000, *args, **kwargs):
def load_apres_single_file(fn_apres, burst=1, fs=40000, *args, **kwargs):
"""

@@ -101,2 +107,8 @@ Load ApRES data

Returns
---------
ApresData: class
data object
### Original Matlab Notes ###

@@ -116,10 +128,27 @@

## Load data and reshape array
if fn_apres[-4:] == '.mat':
# TODO: fix this in the __init__ file
apres_data = ApresData(fn_apres)
# We can be raw, impdar mat, BAS mat, or impdar h5
ext = os.path.splitext(fn_apres)[1]
if ext == '.mat':
dat = loadmat(fn_apres)
# The old bas script uses vdat as the main struct
if 'vdat' in dat:
impdar_format = False
else:
impdar_format = True
dat = None
if impdar_format:
apres_data = ApresData(fn_apres)
else:
apres_data = load_BAS_mat(fn_apres)
return apres_data
elif ext == '.h5':
return ApresData(fn_apres)
else:
# Load data and reshape array
apres_data = ApresData(None)
apres_data.header.update_parameters(fn_apres)
start_ind,end_ind = load_burst(apres_data, burst, fs)
start_ind, end_ind = load_burst(apres_data, burst, fs)

@@ -130,5 +159,6 @@ # Extract just good chirp data from voltage record and rearrange into

# of sync tone etc which occur after each 40K of chirp.
AttSet = apres_data.header.attenuator1 + 1j*apres_data.header.attenuator2 # unique code for attenuator setting
AttSet = apres_data.header.attenuator1 + 1j * \
apres_data.header.attenuator2 # unique code for attenuator setting
## Add metadata to structure
# Add metadata to structure

@@ -143,26 +173,32 @@ # Sampling parameters

else:
apres_data.header.f1 = apres_data.header.f0 + apres_data.header.chirp_length * apres_data.header.chirp_grad/2./np.pi
apres_data.header.bandwidth = apres_data.header.chirp_length * apres_data.header.chirp_grad/2/np.pi
apres_data.header.f1 = apres_data.header.f0 + \
apres_data.header.chirp_length * apres_data.header.chirp_grad/2./np.pi
apres_data.header.bandwidth = apres_data.header.chirp_length * \
apres_data.header.chirp_grad/2/np.pi
apres_data.header.fc = apres_data.header.f0 + apres_data.header.bandwidth/2.
apres_data.dt = 1./apres_data.header.fs
apres_data.header.er = 3.18
apres_data.header.ci = 3e8/np.sqrt(apres_data.header.er);
apres_data.header.lambdac = apres_data.header.ci/apres_data.header.fc;
apres_data.header.ci = 3e8/np.sqrt(apres_data.header.er)
apres_data.header.lambdac = apres_data.header.ci/apres_data.header.fc
# Load each chirp into a row
data_load = np.zeros((apres_data.cnum,apres_data.snum)) # preallocate array
# preallocate array
data_load = np.zeros((apres_data.cnum, apres_data.snum))
apres_data.chirp_num = np.arange(apres_data.cnum)
apres_data.chirp_att = np.zeros((apres_data.cnum)).astype(np.cdouble)
apres_data.chirp_att = np.zeros(
(apres_data.cnum)).astype(np.cdouble)
apres_data.chirp_time = np.zeros((apres_data.cnum))
chirp_interval = 1.6384/(24.*3600.); # days TODO: why is this assigned directly?
apres_data.chirp_interval = 1.6384/(24.*3600.) # in days; apparently this is always the interval?
for chirp in range(apres_data.cnum):
data_load[chirp,:] = apres_data.data[start_ind[chirp]:end_ind[chirp]]
apres_data.chirp_att[chirp] = AttSet[chirp//apres_data.cnum] # attenuator setting for chirp
apres_data.chirp_time[chirp] = apres_data.decday + chirp_interval*(chirp-1) # time of chirp
data_load[chirp, :] = apres_data.data[start_ind[chirp]: end_ind[chirp]]
# attenuator setting for chirp
apres_data.chirp_att[chirp] = AttSet[chirp//apres_data.cnum]
apres_data.chirp_time[chirp] = apres_data.decday + apres_data.chirp_interval*chirp
apres_data.data = data_load
# Create time and frequency stamp for samples
apres_data.travel_time = apres_data.dt*np.arange(apres_data.snum) # sampling times (rel to first)
apres_data.frequencies = apres_data.header.f0 + apres_data.travel_time*apres_data.header.chirp_grad/(2.*np.pi)
apres_data.travel_time *= 1e6
# sampling times (rel to first)
apres_data.travel_time = apres_data.dt * np.arange(apres_data.snum)
apres_data.frequencies = apres_data.header.f0 + apres_data.travel_time * apres_data.header.chirp_grad/(2.*np.pi)
apres_data.travel_time *= 1.0e6

@@ -173,5 +209,4 @@ apres_data.data_dtype = apres_data.data.dtype

# -----------------------------------------------------------------------------------------------------
def load_burst(self,burst=1,fs=40000,max_header_len=2000,burst_pointer=0):
def load_burst(self, burst=1, fs=40000, max_header_len=2000, burst_pointer=0):
"""

@@ -192,5 +227,10 @@ Load bursts from the apres acquisition.

Output
Returns
---------
start_ind: int
data index for given burst
end_ind: int
data index for given burst
### Original Matlab Script Notes ###

@@ -210,10 +250,10 @@ Read FMCW data file from after Oct 2014 (RMB2b + VAB Iss C, SW Issue >= 101)

try:
fid = open(self.header.fn,'rb')
except:
fid = open(self.header.fn, 'rb')
except FileNotFoundError:
# Unknown file
self.flags.file_read_code = 'Unable to read file' + self.header.fn
raise TypeError('Cannot open file', self.header.fn)
raise ImpdarError('Cannot open file', self.header.fn)
# Get the total length of the file
fid.seek(0,2)
fid.seek(0, 2)
file_len = fid.tell()

@@ -226,14 +266,16 @@ burst_count = 1

fid.seek(burst_pointer)
self.header.read_header(self.header.fn,max_header_len)
self.header.read_header(self.header.fn, max_header_len)
try:
# Read header values
strings = ['N_ADC_SAMPLES=','NSubBursts=','Average=','nAttenuators=','Attenuator1=',
'AFGain=','TxAnt=','RxAnt=']
strings = ['N_ADC_SAMPLES=', 'NSubBursts=', 'Average=', 'nAttenuators=', 'Attenuator1=',
'AFGain=', 'TxAnt=', 'RxAnt=']
output = np.empty((len(strings))).astype(str)
for i,string in enumerate(strings):
for i, string in enumerate(strings):
if string in self.header.header_string:
search_start = self.header.header_string.find(string)
search_end = self.header.header_string[search_start:].find('\\')
output[i] = self.header.header_string[search_start+len(string):search_end+search_start]
search_end = self.header.header_string[search_start:].find(
'\\')
output[i] = self.header.header_string[search_start + len(string):
search_end+search_start]

@@ -245,9 +287,11 @@ # Write header values to data object

self.header.n_attenuators = int(output[3])
self.header.attenuator1 = np.array(output[4].split(',')).astype(int)[:self.header.n_attenuators]
self.header.attenuator2 = np.array(output[5].split(',')).astype(int)[:self.header.n_attenuators]
self.header.attenuator1 = np.array(output[4].split(',')).astype(int)[
:self.header.n_attenuators]
self.header.attenuator2 = np.array(output[5].split(',')).astype(int)[
:self.header.n_attenuators]
self.header.tx_ant = np.array(output[6].split(',')).astype(int)
self.header.rx_ant = np.array(output[7].split(',')).astype(int)
self.header.tx_ant = self.header.tx_ant[self.header.tx_ant==1]
self.header.rx_ant = self.header.rx_ant[self.header.rx_ant==1]
self.header.tx_ant = self.header.tx_ant[self.header.tx_ant == 1]
self.header.rx_ant = self.header.rx_ant[self.header.rx_ant == 1]

@@ -257,4 +301,4 @@ if self.average != 0:

else:
self.cnum = self.n_subbursts*len(self.header.tx_ant)*\
len(self.header.rx_ant)*self.header.n_attenuators
self.cnum = self.n_subbursts*len(self.header.tx_ant) *\
len(self.header.rx_ant)*self.header.n_attenuators

@@ -266,7 +310,8 @@ # End of burst

except:
except ValueError:
# If the burst read is unsuccessful exit with an updated read code
self.flags.file_read_code = 'Corrupt header in burst' + str(burst_count) + 'for file' + self.header.fn
self.flags.file_read_code = 'Corrupt header in burst' + \
str(burst_count) + 'for file' + self.header.fn
self.bnum = burst_count
raise TypeError('Burst Read Failed.')
raise ImpdarError('Burst Read Failed.')

@@ -285,18 +330,24 @@ # Move the burst pointer

# Look for a few different strings and save output
strings = ['Time stamp=','Temp1=','Temp2=','BatteryVoltage=']
strings = ['Time stamp=', 'Temp1=', 'Temp2=', 'BatteryVoltage=']
output = []
for i,string in enumerate(strings):
for i, string in enumerate(strings):
if string in self.header.header_string:
search_start = [m.start() for m in re.finditer(string, self.header.header_string)]
search_end = [self.header.header_string[ind:].find('\\') for ind in search_start]
out = [self.header.header_string[search_start[i]+len(string):search_end[i]+search_start[i]] for i in range(len(search_start))]
search_start = [m.start() for m in re.finditer(
string, self.header.header_string)]
search_end = [self.header.header_string[ind:].find(
'\\') for ind in search_start]
out = [self.header.header_string[search_start[i] + len(string):
search_end[i]+search_start[i]]
for i in range(len(search_start))]
output.append(out)
if 'Time stamp' not in self.header.header_string:
self.flags.file_read_code = 'Burst' + str(self.bnum) + 'not found in file' + self.header.fn
self.flags.file_read_code = 'Burst' + \
str(self.bnum) + 'not found in file' + self.header.fn
else:
self.time_stamp = np.array([datetime.datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S') for str_time in output[0]])
self.time_stamp = np.array([datetime.datetime.strptime(
str_time, '%Y-%m-%d %H:%M:%S') for str_time in output[0]])
timezero = datetime.datetime(1, 1, 1, 0, 0, 0)
day_offset = self.time_stamp - timezero
self.decday = np.array([offset.days for offset in day_offset]) + 377. # Matlab compatable
self.decday = np.array([offset.days + offset.seconds/86400. for offset in day_offset]) + 366. # Matlab compatable

@@ -317,19 +368,23 @@ self.temperature1 = np.array(output[1]).astype(float)

# too few bursts in file
self.flags.file_read_code = 'Burst' + str(self.bnum) + 'not found in file' + self.header.fn
self.flags.file_read_code = 'Burst' + \
str(self.bnum) + 'not found in file' + self.header.fn
self.bnum = burst_count - 1
raise TypeError('Burst ' + str(self.bnum) + ' not found in file ' + self.header.fn)
raise ValueError('Burst {:d} not found in file {:s}'.format(self.bnum, self.header.fn))
else:
# TODO: Check the other readers for average == 1 or average == 2
if self.average == 2:
self.data = np.fromfile(fid,dtype='uint32',count=self.cnum*self.snum)
self.data = np.fromfile(
fid, dtype='uint32', count=self.cnum*self.snum)
elif self.average == 1:
fid.seek(burst_pointer+1)
self.data = np.fromfile(fid,dtype='float4',count=self.cnum*self.snum)
self.data = np.fromfile(
fid, dtype='float4', count=self.cnum*self.snum)
else:
self.data = np.fromfile(fid,dtype='uint16',count=self.cnum*self.snum)
self.data = np.fromfile(
fid, dtype='uint16', count=self.cnum*self.snum)
if fid.tell()-(burst_pointer-1) < self.cnum*self.snum:
self.flags.file_read_code = 'Corrupt header in burst' + str(burst_count) + 'for file' + self.header.fn
self.flags.file_read_code = 'Corrupt header in burst' + \
str(burst_count) + 'for file' + self.header.fn
self.data[self.data<0] = self.data[self.data<0] + 2**16.
self.data[self.data < 0] = self.data[self.data < 0] + 2**16.
self.data = self.data.astype(float) * 2.5/2**16.

@@ -340,3 +395,3 @@

start_ind = np.transpose(np.arange(0,self.snum*self.cnum,self.snum))
start_ind = np.transpose(np.arange(0, self.snum*self.cnum, self.snum))
end_ind = start_ind + self.snum

@@ -348,8 +403,51 @@ self.bnum = burst

# Clean temperature record (wrong data type?)
self.temperature1[self.temperature1>300] -= 512
self.temperature2[self.temperature2>300] -= 512
self.temperature1[self.temperature1 > 300] -= 512
self.temperature2[self.temperature2 > 300] -= 512
self.flags.file_read_code = 'Successful Read'
return start_ind,end_ind
return start_ind, end_ind
def load_BAS_mat(fn, chirp_interval=1.6384/(24.*3600.)):
"""Load apres data from a mat file saved by software from the British Antarctic Survey.
Parameters
----------
fn: str
Matlab file name for ApresData
chirp_interval: float
set by default
Returns
-------
apres_data: class
ImpDAR data object
"""
mat = loadmat(fn)
apres_data = ApresData(None)
apres_data.header.fs = mat['vdat'][0]['fs'][0][0][0]
apres_data.header.attenuator1 = mat['vdat'][0]['Attenuator_1'][0][0][0]
apres_data.header.attenuator2 = mat['vdat'][0]['Attenuator_2'][0][0][0]
apres_data.snum = mat['vdat'][0]['Nsamples'][0][0][0]
apres_data.cnum = mat['vdat'][0]['chirpNum'][0][0][0]
apres_data.bnum = mat['vdat'][0]['Burst'][0][0][0]
apres_data.n_subbursts = mat['vdat'][0]['SubBurstsInBurst'][0][0][0]
apres_data.average = mat['vdat'][0]['Average'][0][0][0]
apres_data.data = mat['vdat'][0]['v'][0].T
apres_data.travel_time = mat['vdat'][0]['t'][0].T
apres_data.frequencies = mat['vdat'][0]['f'][0].T
apres_data.dt = 1.0 / apres_data.header.fs
apres_data.chirp_num = np.arange(apres_data.cnum) + 1
apres_data.chirp_att = mat['vdat'][0]['chirpAtt'][0]
apres_data.decday = mat['vdat'][0]['TimeStamp'][0][0][0]
apres_data.chirp_interval = chirp_interval
apres_data.chirp_time = apres_data.decday + apres_data.chirp_interval * np.arange(0.0, apres_data.cnum, 1.0)
apres_data.check_attrs()
return apres_data

@@ -345,3 +345,3 @@ #! /usr/bin/env python

def kinematic_gps_control(dats, lat, lon, elev, decday, offset=0.0,
extrapolate=False, guess_offset=True):
extrapolate=False, guess_offset=True, old_gps_gaps=False):
"""Use new, better GPS data for lat, lon, and elevation.

@@ -381,2 +381,3 @@

the offset. Else we look at +/- 0.001 days
"""

@@ -397,20 +398,28 @@ if extrapolate:

print('CC search')
for i in range(5):
for j, dat in enumerate(dats):
for j, dat in enumerate(dats):
decday_interp = dat.decday.copy()
if old_gps_gaps:
for i,dday in enumerate(decday_interp):
if np.min(abs(dday-decday))>1./(24*3600.):
decday_interp[i] = np.nan
dat.lat[dat.lat == 0.] = np.nan
dat.long[dat.long == 0.] = np.nan
for i in range(5):
if (min(lon % 360) - max(dat.long % 360)) > 0. or (min(dat.long % 360) - max(lon % 360)) > 0.:
raise ValueError('No overlap in longitudes')
if offsets[j] != 0.0:
search_vals = np.linspace(-0.1 * abs(offsets[j]),
0.1 * abs(offsets[j]),
1001)
0.1 * abs(offsets[j]),1001)
else:
search_vals = np.linspace(-0.1, 0.1, 5001)
cc_coeffs = [np.corrcoef(
interp1d(decday + offsets[j] + inc_offset, lat,
kind='linear', fill_value=fill_value)(dat.decday),
dat.lat)[1, 1] + np.corrcoef(
interp1d(decday + offsets[j] + inc_offset, lon % 360,
kind='linear', fill_value=fill_value)(
dat.decday), dat.long % 360)[0, 1]
for inc_offset in search_vals]
cc_coeffs = np.zeros_like(search_vals)
for i_search,inc_offset in enumerate(search_vals):
lat_interp = interp1d(decday + inc_offset + offsets[j], lat, kind='linear', fill_value=fill_value)(decday_interp)
long_interp = interp1d(decday + inc_offset + offsets[j], lon%360, kind='linear', fill_value=fill_value)(decday_interp)
idxs_lat = ~np.isnan(lat_interp) & ~np.isnan(dat.lat)
idxs_long = ~np.isnan(long_interp) & ~np.isnan(dat.long)
cc_coeffs[i_search] = np.corrcoef(lat_interp[idxs_lat], dat.lat[idxs_lat])[0, 1] + \
np.corrcoef(long_interp[idxs_long], dat.long[idxs_long] % 360)[0, 1]
offsets[j] += search_vals[np.argmax(cc_coeffs)]

@@ -420,16 +429,28 @@ print('Maximum correlation at offset: {:f}'.format(offsets[j]))

for j, dat in enumerate(dats):
int_lat = interp1d(decday + offsets[j],
decday_interp = dat.decday.copy()
lat_interpolator = interp1d(decday + offsets[j],
lat,
kind='linear',
fill_value=fill_value)
int_long = interp1d(decday + offsets[j],
long_interpolator = interp1d(decday + offsets[j],
lon % 360, kind='linear',
fill_value=fill_value)
int_elev = interp1d(decday + offsets[j],
elev_interpolator = interp1d(decday + offsets[j],
elev,
kind='linear',
fill_value=fill_value)
dat.lat = int_lat(dat.decday)
dat.long = int_long(dat.decday)
dat.elev = int_elev(dat.decday)
if old_gps_gaps:
lat_interp = lat_interpolator(decday_interp)
long_interp = long_interpolator(decday_interp)
elev_interp = elev_interpolator(decday_interp)
lat_interp[np.isnan(decday_interp)] = dat.lat[np.isnan(decday_interp)]
long_interp[np.isnan(decday_interp)] = dat.long[np.isnan(decday_interp)]
elev_interp[np.isnan(decday_interp)] = dat.elev[np.isnan(decday_interp)]
dat.lat = lat_interp
dat.long = long_interp%360
dat.elev = elev_interp
else:
dat.lat = lat_interpolator(decday_interp)
dat.long = long_interpolator(decday_interp)
dat.elev = elev_interpolator(decday_interp)
if conversions_enabled:

@@ -440,3 +461,3 @@ dat.get_projected_coords()

def kinematic_gps_mat(dats, mat_fn, offset=0.0, extrapolate=False,
guess_offset=False):
guess_offset=False, old_gps_gaps=False):
"""Use a matlab file with gps info to redo radar GPS.

@@ -471,7 +492,7 @@

offset=offset, extrapolate=extrapolate,
guess_offset=guess_offset)
guess_offset=guess_offset, old_gps_gaps=old_gps_gaps)
def kinematic_gps_csv(dats, csv_fn, offset=0, names='decday,long,lat,elev',
extrapolate=False, guess_offset=False,
extrapolate=False, guess_offset=False, old_gps_gaps=False,
**genfromtxt_flags):

@@ -516,3 +537,4 @@ """Use a csv gps file to redo the GPS on radar data.

extrapolate=extrapolate,
guess_offset=guess_offset)
guess_offset=guess_offset,
old_gps_gaps=old_gps_gaps)

@@ -519,0 +541,0 @@

@@ -16,4 +16,4 @@ #! /usr/bin/env python

from . import load_mcords # needs to be imported first and alone due to opaque h5py/netcdf4 error
from . import load_gssi, load_pulse_ekko, load_gprMax, load_olaf, load_segy, load_UoA_mat
from . import load_delores, load_osu, load_stomat, load_ramac, load_bsi
from . import load_gssi, load_pulse_ekko, load_gprMax, load_olaf, load_segy, load_UoA
from . import load_delores, load_osu, load_stomat, load_ramac, load_bsi, load_tek
from ..RadarData import RadarData

@@ -24,4 +24,5 @@

# to figure out what the available options are
FILETYPE_OPTIONS = ['mat', 'pe', 'gssi','stomat', 'gprMax', 'gecko', 'segy',
'mcords_mat', 'mcords_nc', 'UoA_mat', 'ramac', 'bsi', 'delores', 'osu', 'ramac']
FILETYPE_OPTIONS = ['mat', 'pe', 'gssi', 'stomat', 'gprMax', 'gecko', 'segy', 'mcords_mat',
'mcords_nc', 'UoA_mat', 'UoA_h5', 'ramac', 'bsi', 'delores', 'osu',
'ramac', 'tek']

@@ -57,5 +58,5 @@

bn_pe = os.path.splitext(fn)[0]
print(fn,'is a Pulse Ekko project file.\n'+\
'We will partition it into the respective data and header files at ./'+\
bn_pe)
print(fn, 'is a Pulse Ekko project file.\n' +
'We will partition it into the respective data and header files at ./' +
bn_pe)
if not os.path.isdir(bn_pe):

@@ -77,7 +78,7 @@ os.mkdir(bn_pe)

except IOError:
print('Could not load ',fn, 'as a Pulse Ekko file.')
print('Could not load ', fn, 'as a Pulse Ekko file.')
elif filetype == 'mat':
dat = [RadarData(fn) for fn in fns_in]
elif filetype == 'stomat':
dat = [load_stomat.load_stomat(fn) for fn in fns_in]
dat = [load_stomat.load_stomat(fn, **kwargs) for fn in fns_in]
elif filetype == 'gprMax':

@@ -118,4 +119,4 @@ if load_gprMax.H5:

dat = [load_mcords.load_mcords_mat(fn) for fn in fns_in]
elif filetype == 'UoA_mat':
if load_UoA_mat.H5:
elif filetype in ['UoA_mat', 'UoA_h5']:
if load_UoA.H5:
if 'gps_offset' in kwargs:

@@ -125,5 +126,10 @@ gps_offset = kwargs['gps_offset']

gps_offset = 0.0
dat = [load_UoA_mat.load_UoA_mat(fn, gps_offset=gps_offset) for fn in fns_in]
if filetype == 'UoA_mat':
dat = [load_UoA.load_UoA_mat(fn, gps_offset=gps_offset) for fn in fns_in]
elif filetype == 'UoA_h5':
dat = []
for fn in fns_in:
dat += load_UoA.load_UoA_h5(fn, gps_offset=gps_offset)
else:
raise ImportError('You need h5py for UoA_mat')
raise ImportError('You need h5py for UoA')
elif filetype == 'delores':

@@ -135,2 +141,4 @@ dat = [load_delores.load_delores(fn, channel=channel) for fn in fns_in]

dat = [load_ramac.load_ramac(fn) for fn in fns_in]
elif filetype == 'tek':
dat = [load_tek.load_tek(fn) for fn in fns_in]
else:

@@ -155,2 +163,3 @@ raise ValueError('Unrecognized filetype')

def load_and_exit(filetype, fns_in, channel=1, t_srs=None, s_srs=None, o=None, *args, **kwargs):

@@ -196,2 +205,3 @@ """Load a list of files of a certain type, save them as StODeep mat files, exit

def _save(rd_list, outpath=None):

@@ -198,0 +208,0 @@ """Save a list of RadarData objects with optional output directory."""

@@ -36,16 +36,19 @@ #! /usr/bin/env python

sto_data.tnum = sto_mat['tnum'][0][0]
sto_data.trace_num = sto_mat['trace_num'][0]
sto_data.trig_level = sto_mat['trig_level'][0]
sto_data.pressure = sto_mat['pressure'][0]
sto_data.trace_num = np.squeeze(sto_mat['trace_num'])
sto_data.trig_level = np.squeeze(sto_mat['trig_level'])
sto_data.travel_time = sto_data.dt * 1.0e6 * np.arange(sto_data.snum)
sto_data.lat = sto_mat['lat'][0]
sto_data.long = sto_mat['long'][0]
sto_data.elev = sto_mat['elev'][0]
sto_data.decday = sto_mat['decday'][0]
sto_data.trace_int = sto_mat['trace_int'][0]
sto_data.dist = sto_mat['dist'][0]
sto_data.lat = np.squeeze(sto_mat['lat'])
sto_data.long = np.squeeze(sto_mat['long'])
sto_data.elev = np.squeeze(sto_mat['elev'])
sto_data.decday = np.squeeze(sto_mat['decday'])
sto_data.trace_int = np.squeeze(sto_mat['trace_int'])
sto_data.dist = np.squeeze(sto_mat['dist'])
# The pressure variable is deprecated and always gives issues
sto_data.pressure = np.squeeze(sto_mat['pressure'])
if len(sto_data.pressure) != sto_data.tnum:
sto_data.pressure = np.zeros(sto_data.tnum)
# Try the x/y coordinates but not neccesarry
try:
sto_data.x_coord = sto_mat['x_coord'][0]
sto_data.y_coord = sto_mat['y_coord'][0]
sto_data.x_coord = np.squeeze(sto_mat['x_coord'])
sto_data.y_coord = np.squeeze(sto_mat['y_coord'])
except:

@@ -74,4 +77,4 @@ Warning('No projected coordinate system (x,y).')

# TODO: Import the flags
sto_data.flags = RadarFlags()
"""
sto_data.flags = RadarFlags()
sto_flags = sto_mat['flags']

@@ -78,0 +81,0 @@ 'bpass'

@@ -14,2 +14,3 @@ /*

#include <stdlib.h>
#include <Python.h>

@@ -28,5 +29,4 @@ /* Migrate data into migdata */

setbuf(stdout, NULL);
printf("Iter: ");
PySys_WriteStdout("Beginning migration");
iter_start = clock();

@@ -37,5 +37,7 @@ /* You can flip the loop order, but I think this is marginally faster

if (j % 100 == 0){
printf("%d", j);
}else{
printf(".");
if (j > 0){
PySys_WriteStdout("trace %d", j);
}
}else if (j % 10 == 0){
PySys_WriteStdout(".");
}

@@ -101,7 +103,7 @@ for(i=0;i<snum;i++){

time_remaining = (tnum - j) * el_t / j;
printf("\nEst. time remaining: %f sec", time_remaining);
PySys_WriteStdout("\nEst. time remaining: %4.2f sec", time_remaining);
}
}
}
printf("\n");
PySys_WriteStdout("\n");
}

@@ -90,3 +90,3 @@ #! /usr/bin/env python

print('Kirchhoff Migration (diffraction summation) of %.0fx%.0f matrix' % (dat.tnum, dat.snum))
print('Kirchhoff Migration (diffraction summation) of %.0fx%.0f matrix' % (dat.snum, dat.tnum))
# check that the arrays are compatible

@@ -114,3 +114,3 @@ _check_data_shape(dat)

print('Kirchhoff Migration of %.0fx%.0f matrix complete in %.2f seconds'
% (dat.tnum, dat.snum, time.time() - start))
% (dat.snum, dat.tnum, time.time() - start))
return dat

@@ -138,3 +138,3 @@

print('Stolt Migration (f-k migration) of %.0fx%.0f matrix'%(dat.tnum,dat.snum))
print('Stolt Migration (f-k migration) of %.0fx%.0f matrix'%(dat.snum, dat.tnum))
# check that the arrays are compatible

@@ -207,3 +207,3 @@ _check_data_shape(dat)

print('Stolt Migration of %.0fx%.0f matrix complete in %.2f seconds'
%(dat.tnum,dat.snum,time.time()-start))
% (dat.snum, dat.tnum, time.time() - start))
return dat

@@ -247,3 +247,3 @@

print('Phase-Shift Migration of %.0fx%.0f matrix'%(dat.tnum,dat.snum))
print('Phase-Shift Migration of %.0fx%.0f matrix'%(dat.snum,dat.tnum))
# check that the arrays are compatible

@@ -288,3 +288,3 @@ _check_data_shape(dat)

print('Phase-Shift Migration of %.0fx%.0f matrix complete in %.2f seconds'
%(dat.tnum,dat.snum,time.time()-start))
% (dat.snum, dat.tnum, time.time() - start))
return dat

@@ -326,3 +326,3 @@

"""
print('Time-Wavenumber Migration of %.0fx%.0f matrix'%(dat.tnum,dat.snum))
print('Time-Wavenumber Migration of %.0fx%.0f matrix'%(dat.snum, dat.tnum))
# check that the arrays are compatible

@@ -358,3 +358,3 @@ _check_data_shape(dat)

print('Time-Wavenumber Migration of %.0fx%.0f matrix complete in %.2f seconds'
%(dat.tnum,dat.snum,time.time()-start))
% (dat.snum, dat.tnum, time.time() - start))
return dat

@@ -653,2 +653,2 @@

if np.size(dat.data, 1) != dat.tnum or np.size(dat.data, 0) != dat.snum:
raise ValueError('The input array must be of size (tnum,snum)')
raise ValueError('The input array must be of size (snum, tnum)')

@@ -371,4 +371,4 @@ #! /usr/bin/env python

return np.array([datetime.datetime(1970, 1, 1) +
datetime.timedelta(days=int(dd)) +
datetime.timedelta(days=int(dd)) +
datetime.timedelta(days=dd % 1)
for dd in self.decday], dtype=np.datetime64)

@@ -142,3 +142,3 @@ #! /usr/bin/env python

d = minimize(optimize_moveout_depth,.5*t*uice,args=(t,ant_sep,d_interp,u_interp),
tol=1e-8,bounds=((0, 0.5*t*uair),))['x'][0]
tol=1e-8,bounds=[(0,0.5*t*uair)])['x'][0]
u_rms = np.sqrt(np.mean(u_interp[d_interp<d]**2.))

@@ -145,0 +145,0 @@ # get the upper leg of the trave_path triangle (direct arrival) from the antenna separation and the rms velocity

@@ -60,3 +60,3 @@ #! /usr/bin/env python

self.data_dtype is not None) and (self.data_dtype != mat['data'].dtype):
# Be carefuly of obliterating NaNs
# Be careful of obliterating NaNs
# We will use singles instead of ints for this guess

@@ -63,0 +63,0 @@ if (self.data_dtype in [int, np.int8, np.int16]) and np.any(np.isnan(mat['data'])):

@@ -1,4 +0,4 @@

Metadata-Version: 1.0
Metadata-Version: 2.1
Name: impdar
Version: 1.1.4.post5
Version: 1.1.5
Summary: Scripts for impulse radar

@@ -9,3 +9,48 @@ Home-page: http://github.com/dlilien/impdar

License: GNU GPL-3.0
Description: UNKNOWN
Description: # ImpDAR: an impulse radar processor
[![DOI](https://zenodo.org/badge/134008583.svg)](https://zenodo.org/badge/latestdoi/134008583) [![Tests](https://github.com/dlilien/ImpDAR/actions/workflows/python-package.yml/badge.svg)](https://github.com/dlilien/ImpDAR/actions/workflows/python-package.yml) [![codecov](https://codecov.io/gh/dlilien/ImpDAR/branch/main/graph/badge.svg?token=R51QB61XNP)](https://codecov.io/gh/dlilien/ImpDAR)
ImpDAR is a suite of processing and interpretation tools for impulse radar (targeted for ice-penetrating radar applications but usable for ground-penetrating radar as well). The core processing steps and general terminology come from of the St. Olaf Deep Radar processor, but everything is re-written in python and significant improvements to speed, readability, documentation, and interface have been made across the board. However, this code has a lot of history of contributors--acknowledgment of many of them are preserved in the file headers.
ImpDAR is intended to be more flexible than other available options. Support is gradually being added for a variety of file formats. Currently, [GSSI](http://www.geophysical.com), [PulseEKKO](http://www.sensoft.ca), [Ramac](http://www.malagpr.com), [Blue Systems](http://www.bluesystem.ca/ice-penetrating-radar.html), DELORES, SEGY, [gprMAX](http://www.gprmax.com), Gecko, and legacy StoDeep files are supported. Available processing steps include various filtering operations, trivial modifications such as restacking, cropping, or reversing data, and a few different geolocation-related operations like interpolating to constant trace spacing. The integrated migration routines are in development but Stolt is working.
The primary interface to ImpDAR is through the command line, which allows efficient processing of large volumes of data. An API, centered around the RadarData class, is also available to allow the user to use ImpDAR in other programs.
In addition to processing, ImpDAR can also be used for interpreting the radargrams (i.e. picking layers). Picking is generally an interactive process, and there is a GUI for doing the picking; the GUI requires PyQt5, which may be annoying as a source build but is easy to install with Anaconda. The GUI also allows for basic processing steps, with the updates happening to the plot in real time. However, we have not added an 'undo' button to the GUI, so you are stuck with going back to your last saved version if you do not like the interactive results.
## Documentation
Documentation of the various processing steps is [here](https://impdar.readthedocs.io/en/latest/). There are examples of basic processing and plotting, and a longer example showing migration.
## Installation
Easiest is `pip install impdar`. Some explanation of other options is available in the main documentation, but PyPi will be updated with each release.
### Dependencies
#### Required
*Python 3* The package is tested on Python 3.7+. Older versions may work, but we have stopped testing on 2.7 since it has reached end of life. You can probably get 2.7 to work still, but no guarantees.
You also need:
[numpy](http://www.scipy.org),
[scipy](http://numpy.org),
[matplotlib](http://matplotlib.org),
[SegYIO](https://github.com/equinor/segyio/) for SEGY support and for SeisUnix migration, and
[h5py](https://h5py.org) is needed for some data formats.
I recommend just using Anaconda for your install, since it will also get you PyQt and therefore enable the GUI.
#### Recommended
[GDAL](http://gdal.org) is needed to reproject out of WGS84, and thus for proper distance measurement. Otherwise, distance calculations re going to moderately or severely incorrect.
[PyQt5](https://pypi.org/project/PyQt5/) is needed to run the GUI, which is needed for picking. You can do everything from the command line, and plot the results with matplotlib, without PyQt5.
Depending on whether you need migration routines, there may be some external dependencies. ImpDAR is designed to interface with [SeisUnix](http://https://github.com/JohnWStockwellJr/SeisUnix), which contains a number of powerful migration routines. You need to install SeisUnix yourself and get it on your path. If you are running windows, you need to figure out how to use Cygwin as well. However, the pure python migration routines in ImpDAR can work quite well, so don't let the difficulty of installing these compiled routines stop you from using those. ImpDAR searches for SeisUnix at the time of the call to the migration routine, so you can always add this later if you find that you need it.
## Contributing
Support for different radar data types has primarily been added as needed--contributions for data readers for other systems, whether commercial or custom, are always welcome.
Platform: UNKNOWN
Description-Content-Type: text/markdown

@@ -30,3 +30,4 @@ #! /usr/bin/env python

'imppick=impdar.bin.imppick:main',
'impplot=impdar.bin.impplot:main']
'impplot=impdar.bin.impplot:main',
'apdar=impdar.bin.apdar:main']

@@ -43,3 +44,3 @@ ext = '.pyx' if CYTHON else '.c'

version = '1.1.4.post5'
version = '1.1.5'
packages = ['impdar',

@@ -73,2 +74,4 @@ 'impdar.lib',

packages=packages,
long_description=open('README.md', 'r').read(),
long_description_content_type='text/markdown',
test_suite='nose.collector')

@@ -87,2 +90,4 @@ except SystemExit:

packages=packages,
long_description=open('README.md', 'r').read(),
long_description_content_type='text/markdown',
test_suite='nose.collector')
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Load an output file processed from the University of Alabama VHF or UWB radars (2019).
Data are all processed, but some more complex data views are not available.
This allows easy colorbar adjustment, interpolation to geospatial coordinates, and layer picking.
"""
import numpy as np
from scipy.interpolate import interp1d
from ..RadarData import RadarData, RadarFlags
from ..gpslib import nmea_info
try:
import h5py
H5 = True
except ImportError:
H5 = False
def load_UoA_mat(fn_mat, gps_offset=0.0):
"""Load MCoRDS data in .mat format downloaded from the CReSIS ftp client
Parameters
----------
fn: str
The filename to load
"""
UoA_data = RadarData(None)
UoA_data.fn = fn_mat
with h5py.File(fn_mat, 'r') as fin:
UoA_data.data = fin['Data']['channel'][:, :].T
# if len(UoA_data.data.dtype) == 2:
# UoA_data.data = UoA_data.data['real'] + 1.j * UoA_data.data['imag']
# complex data
if len(UoA_data.data.dtype) == 2:
UoA_data.data = 10 * np.log10(np.sqrt(UoA_data.data['real'] ** 2.0 + UoA_data.data['imag'] ** 2.0))
else:
UoA_data.data = 10 * np.log10(UoA_data.data)
UoA_data.snum, UoA_data.tnum = int(UoA_data.data.shape[0]), int(UoA_data.data.shape[1])
UoA_data.trace_num = np.arange(UoA_data.tnum) + 1
UoA_data.travel_time = fin['Data']['fast_time'][:].flatten() * 1.0e6
UoA_data.dt = np.mean(np.diff(UoA_data.travel_time)) * 1.0e-6
nminfo = nmea_info()
nminfo.time = (fin['INS_GPS']['POSIX_time'][:].flatten() + gps_offset) / (24. * 60. * 60.) # Stored in seconds
nminfo.ppstime = fin['INS_GPS']['POSIX_time'][:].flatten() + gps_offset
nminfo.lat = fin['INS_GPS']['latitude'][:].flatten()
nminfo.lon = fin['INS_GPS']['longitude'][:].flatten()
nminfo.elev = fin['INS_GPS']['altitude_MSL'][:].flatten()
UoA_data.lat = interp1d(nminfo.ppstime, nminfo.lat, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.long = interp1d(nminfo.ppstime, nminfo.lon, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.elev = interp1d(nminfo.ppstime, nminfo.elev, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.decday = interp1d(nminfo.ppstime, nminfo.time, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
try:
UoA_data.get_projected_coords()
except ImportError:
pass
UoA_data.trace_int = UoA_data.decday[1] - UoA_data.decday[0]
UoA_data.pressure = np.zeros_like(UoA_data.decday)
UoA_data.trig = np.zeros_like(UoA_data.decday).astype(int)
UoA_data.trig_level = 0.
UoA_data.flags = RadarFlags()
UoA_data.flags.power = False
if fn_mat[-10:] == '_files.mat':
UoA_data.chan = 999
else:
if 'hannel' in fn_mat:
idx = fn_mat.index('hannel')
UoA_data.chan = int(fn_mat[idx + 6])
elif 'Ch' in fn_mat:
idx = fn_mat.index('Ch')
UoA_data.chan = int(fn_mat[idx + 2])
else:
UoA_data.chan = 10
UoA_data.check_attrs()
return UoA_data

Sorry, the diff of this file is too big to display