Skip to content

Commit

Permalink
Bug fixes in Frogfish release
Browse files Browse the repository at this point in the history
  • Loading branch information
dmitriykovalev committed Feb 5, 2021
1 parent 276d0d6 commit d4b9f57
Show file tree
Hide file tree
Showing 8 changed files with 140 additions and 60 deletions.
6 changes: 6 additions & 0 deletions debian/changelog
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
coral (1.0.1) stable; urgency=medium

* pycoral initial release update

-- Coral <[email protected]> Thu, 04 Feb 2021 14:37:49 -0800

coral (1.0) stable; urgency=medium

* pycoral initial release
Expand Down
11 changes: 8 additions & 3 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,18 @@ reference with Sphinx.
You can build the reference docs as follows:

```
# We require Python3, so if that's not your default, first start a virtual environment:
# To ensure consistent results, use a Python virtual environment:
python3 -m venv ~/.my_venvs/coraldocs
source ~/.my_venvs/coraldocs/bin/activate
# Navigate to the pycoral/docs/ directory and run these commands...
# Navigate to the pycoral/ dir and build the pybind APIs:
cd pycoral
bash scripts/build.sh
# OR, build for only the python version and architecture you need. Eg:
# DOCKER_CPUS=k8 scripts/build.sh --python_versions 38
# Install the doc build dependencies:
# Navigate to the pycoral/docs/ dir and install the doc dependencies:
cd docs
pip install -r requirements.txt
# Build the docs:
Expand Down
3 changes: 2 additions & 1 deletion docs/pycoral.utils.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,5 @@ pycoral.utils.edgetpu
.. automodule:: pycoral.utils.edgetpu
:members:
:undoc-members:
:inherited-members:
:inherited-members:
:imported-members:
2 changes: 1 addition & 1 deletion pycoral/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@
# limitations under the License.
"""Version information for Coral Python APIs."""

__version__ = "1.0.0"
__version__ = "1.0.1"
2 changes: 1 addition & 1 deletion pycoral/utils/edgetpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def _gst_buffer_map(buffer):


def _check_input_size(input_size, expected_input_size):
if input_size != expected_input_size:
if input_size < expected_input_size:
raise ValueError('input size={}, expected={}.'.format(
input_size, expected_input_size))

Expand Down
147 changes: 98 additions & 49 deletions src/coral_wrapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -207,56 +207,105 @@ PYBIND11_MODULE(_pywrap_coral, m) {
// Different with import_array() import_array1() has return value.
// https://docs.scipy.org/doc/numpy-1.14.2/reference/c-api.array.html
import_array1();
py::options options;
options.disable_function_signatures();

m.def("InvokeWithMemBuffer",
[](py::object interpreter_handle, intptr_t buffer, size_t size) {
auto* interpreter = reinterpret_cast<tflite::Interpreter*>(
interpreter_handle.cast<intptr_t>());
auto status = coral::InvokeWithMemBuffer(
interpreter, reinterpret_cast<void*>(buffer), size,
static_cast<tflite::StatefulErrorReporter*>(
interpreter->error_reporter()));
if (!status.ok())
throw std::runtime_error(std::string(status.message()));
});

m.def("InvokeWithBytes",
[](py::object interpreter_handle, py::bytes input_data) {
auto* interpreter = reinterpret_cast<tflite::Interpreter*>(
interpreter_handle.cast<intptr_t>());
char* buffer;
ssize_t length;
PyBytes_AsStringAndSize(input_data.ptr(), &buffer, &length);
auto status = coral::InvokeWithMemBuffer(
interpreter, buffer, static_cast<size_t>(length),
static_cast<tflite::StatefulErrorReporter*>(
interpreter->error_reporter()));
if (!status.ok())
throw std::runtime_error(std::string(status.message()));
});

m.def("InvokeWithDmaBuffer",
[](py::object interpreter_handle, int dma_fd, size_t size) {
auto* interpreter = reinterpret_cast<tflite::Interpreter*>(
interpreter_handle.cast<intptr_t>());
auto status = coral::InvokeWithDmaBuffer(
interpreter, dma_fd, size,
static_cast<tflite::StatefulErrorReporter*>(
interpreter->error_reporter()));
if (!status.ok())
throw std::runtime_error(std::string(status.message()));
});

m.def("SupportsDmabuf", [](py::object interpreter_handle) {
auto* interpreter = reinterpret_cast<tflite::Interpreter*>(
interpreter_handle.cast<intptr_t>());
auto* context = interpreter->primary_subgraph().context();
auto* edgetpu_context = static_cast<edgetpu::EdgeTpuContext*>(
context->GetExternalContext(context, kTfLiteEdgeTpuContext));
if (!edgetpu_context) return false;
auto device = edgetpu_context->GetDeviceEnumRecord();
return device.type == edgetpu::DeviceType::kApexPci;
});
m.def(
"InvokeWithMemBuffer",
[](py::object interpreter_handle, uintptr_t buffer, size_t size) {
auto* interpreter = reinterpret_cast<tflite::Interpreter*>(
interpreter_handle.cast<intptr_t>());
py::gil_scoped_release release;
auto status = coral::InvokeWithMemBuffer(
interpreter, reinterpret_cast<void*>(buffer), size,
static_cast<tflite::StatefulErrorReporter*>(
interpreter->error_reporter()));
if (!status.ok())
throw std::runtime_error(std::string(status.message()));
},
R"pbdoc(
Invoke the given ``tflite.Interpreter`` with a pointer to a native
memory allocation.
Works only for Edge TPU models running on PCIe TPU devices.
Args:
interpreter: The ``tflite:Interpreter`` to invoke.
buffer (intptr_t): Pointer to memory buffer with input data.
size (size_t): The buffer size.
)pbdoc");

m.def(
"InvokeWithBytes",
[](py::object interpreter_handle, py::bytes input_data) {
auto* interpreter = reinterpret_cast<tflite::Interpreter*>(
interpreter_handle.cast<intptr_t>());
char* buffer;
ssize_t length;
PyBytes_AsStringAndSize(input_data.ptr(), &buffer, &length);
py::gil_scoped_release release;
auto status = coral::InvokeWithMemBuffer(
interpreter, buffer, static_cast<size_t>(length),
static_cast<tflite::StatefulErrorReporter*>(
interpreter->error_reporter()));
if (!status.ok())
throw std::runtime_error(std::string(status.message()));
},
R"pbdoc(
Invoke the given ``tflite.Interpreter`` with bytes as input.
Args:
interpreter: The ``tflite:Interpreter`` to invoke.
input_data (bytes): Raw bytes as input data.
)pbdoc");

m.def(
"InvokeWithDmaBuffer",
[](py::object interpreter_handle, int dma_fd, size_t size) {
auto* interpreter = reinterpret_cast<tflite::Interpreter*>(
interpreter_handle.cast<intptr_t>());
py::gil_scoped_release release;
auto status = coral::InvokeWithDmaBuffer(
interpreter, dma_fd, size,
static_cast<tflite::StatefulErrorReporter*>(
interpreter->error_reporter()));
if (!status.ok())
throw std::runtime_error(std::string(status.message()));
},
R"pbdoc(
Invoke the given ``tflite.Interpreter`` using a given Linux dma-buf
file descriptor as an input tensor.
Works only for Edge TPU models running on PCIe-based Coral devices.
You can verify device support with ``supports_dmabuf()``.
Args:
interpreter: The ``tflite:Interpreter`` to invoke.
dma_fd (int): DMA file descriptor.
size (size_t): DMA buffer size.
)pbdoc");

m.def(
"SupportsDmabuf",
[](py::object interpreter_handle) {
auto* interpreter = reinterpret_cast<tflite::Interpreter*>(
interpreter_handle.cast<intptr_t>());
auto* context = interpreter->primary_subgraph().context();
auto* edgetpu_context = static_cast<edgetpu::EdgeTpuContext*>(
context->GetExternalContext(context, kTfLiteEdgeTpuContext));
if (!edgetpu_context) return false;
auto device = edgetpu_context->GetDeviceEnumRecord();
return device.type == edgetpu::DeviceType::kApexPci;
},
R"pbdoc(
Checks whether the device supports Linux dma-buf.
Args:
interpreter: The ``tflite:Interpreter`` that's bound to the
Edge TPU you want to query.
Returns:
True if the device supports DMA buffers.
)pbdoc");

m.def("GetRuntimeVersion", &GetRuntimeVersion,
R"pbdoc(
Expand Down
27 changes: 23 additions & 4 deletions tests/edgetpu_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,6 @@ def _run_inference_with_different_input_types(self, interpreter, input_data):
edgetpu.run_inference(interpreter, np_input)
ret = interpreter.tensor(output_index)()
ret0 = np.copy(ret)
self.assertTrue(np.array_equal(ret0, ret))
# bytes
bytes_input = bytes(input_data)
edgetpu.run_inference(interpreter, bytes_input)
Expand All @@ -131,10 +130,19 @@ def _run_inference_with_different_input_types(self, interpreter, input_data):
if _libgst:
gst_input = Gst.Buffer.new_wrapped(bytes_input)
edgetpu.run_inference(interpreter, gst_input)
ret = interpreter.tensor(output_index)()
self.assertTrue(np.array_equal(ret0, ret))
else:
print('Can not import gi. Skip test on Gst.Buffer input type.')

def _run_inference_with_gst(self, interpreter, input_data):
output_index = interpreter.get_output_details()[0]['index']
bytes_input = bytes(input_data)
gst_input = Gst.Buffer.new_wrapped(bytes_input)
edgetpu.run_inference(interpreter, gst_input)
ret = interpreter.tensor(output_index)()
return np.copy(ret)

def test_run_inference_with_different_types(self):
interpreter = edgetpu.make_interpreter(self._default_test_model_path())
interpreter.allocate_tensors()
Expand All @@ -147,9 +155,20 @@ def test_run_inference_larger_input_size(self):
interpreter.allocate_tensors()
input_size = required_input_array_size(interpreter)
input_data = test_utils.generate_random_input(1, input_size + 1)
with self.assertRaisesRegex(ValueError,
'input size=150529, expected=150528'):
self._run_inference_with_different_input_types(interpreter, input_data)
self._run_inference_with_different_input_types(interpreter, input_data)

def test_compare_expected_and_larger_input_size(self):
if _libgst:
interpreter = edgetpu.make_interpreter(self._default_test_model_path())
interpreter.allocate_tensors()
input_size = required_input_array_size(interpreter)
larger_input_data = test_utils.generate_random_input(1, input_size + 1)
larger_ret = self._run_inference_with_gst(interpreter, larger_input_data)
ret = self._run_inference_with_gst(interpreter,
larger_input_data[:input_size])
self.assertTrue(np.array_equal(ret, larger_ret))
else:
print('Can not import gi. Skip test on Gst.Buffer input type.')

def test_run_inference_smaller_input_size(self):
interpreter = edgetpu.make_interpreter(self._default_test_model_path())
Expand Down

0 comments on commit d4b9f57

Please sign in to comment.