Skip to content

Commit

Permalink
FIX: 8bit/16bit, big-endian/little-endian in nexrad reader (#231)
Browse files Browse the repository at this point in the history
* fix 8bit/16bit, big-endian/little-endian in nexrad reader
* only apply mask for 2 byte data
* add history.md entry
  • Loading branch information
kmuehlbauer authored Nov 2, 2024
1 parent 794c606 commit 1dce43b
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 8 deletions.
2 changes: 2 additions & 0 deletions docs/history.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
## 0.8.0 (2024-10-28)

This is the first version which uses datatree directly from xarray. Thus, xarray is pinned to version >= 2024.10.0.

* FIX: Correctly handle 8bit/16bit, big-endian/little-endian in nexrad reader (PHI and ZDR) ({issue}`230`) by [@syedhamidali](https://github.com/syedhamidali), ({pull}`231`) by [@kmuehlbauer](https://github.com/kmuehlbauer).
* ENH: Refactoring all xradar backends to use `from_dict` datatree constructor. Test for `_get_required_root`, `_get_subgroup`, and `_get_radar_calibration` were also added ({pull}`221`) by [@aladinor](https://github.com/aladinor)
* ENH: Added pytests to the missing functions in the `test_xradar` and `test_iris` in order to increase codecov in ({pull}`228`) by [@syedhamidali](https://github.com/syedhamidali).
* ENH: Updated Readme ({pull}`226`) by [@syedhamidali](https://github.com/syedhamidali).
Expand Down
21 changes: 13 additions & 8 deletions xradar/io/backends/nexrad_level2.py
Original file line number Diff line number Diff line change
Expand Up @@ -600,16 +600,15 @@ def get_data(self, sweep_number, moment=None):
ngates = moments[name]["ngates"]
word_size = moments[name]["word_size"]
data_offset = moments[name]["data_offset"]
ws = {8: 1, 16: 2}
width = ws[word_size]
width = {8: 1, 16: 2}[word_size]
data = []
self.rh.pos += data_offset
data.append(self._rh.read(ngates, width=width).view(f"uint{word_size}"))
data.append(self._rh.read(ngates, width=width).view(f">u{width}"))
while self.init_next_record() and self.record_number <= stop:
if self.record_number in intermediate_records:
continue
self.rh.pos += data_offset
data.append(self._rh.read(ngates, width=width).view(f"uint{word_size}"))
data.append(self._rh.read(ngates, width=width).view(f">u{width}"))
moments[name].update(data=data)

def get_data_header(self):
Expand Down Expand Up @@ -1247,9 +1246,9 @@ def __init__(self, datastore, name, var):
- len(datastore.ds["intermediate_records"])
)
nbins = max([v["ngates"] for k, v in datastore.ds["sweep_data"].items()])
self.dtype = np.dtype("uint8")
if name == "PHI":
self.dtype = np.dtype("uint16")
word_size = datastore.ds["sweep_data"][name]["word_size"]
width = {8: 1, 16: 2}[word_size]
self.dtype = np.dtype(f">u{width}")
self.shape = (nrays, nbins)

def _getitem(self, key):
Expand All @@ -1259,8 +1258,14 @@ def _getitem(self, key):
except KeyError:
self.datastore.root.get_data(self.group, self.name)
data = self.datastore.ds["sweep_data"][self.name]["data"]
if self.name == "PHI":
# see 3.2.4.17.6 Table XVII-I Data Moment Characteristics and Conversion for Data Names
word_size = self.datastore.ds["sweep_data"][self.name]["word_size"]
if self.name == "PHI" and word_size == 16:
# 10 bit mask, but only for 2 byte data
x = np.uint16(0x3FF)
elif self.name == "ZDR" and word_size == 16:
# 11 bit mask, but only for 2 byte data
x = np.uint16(0x7FF)
else:
x = np.uint8(0xFF)
if len(data[0]) < self.shape[1]:
Expand Down

0 comments on commit 1dce43b

Please sign in to comment.