Skip to content

Commit

Permalink
Get Demo_DFFN conversions working after incorporating tips from:
Browse files Browse the repository at this point in the history
  • Loading branch information
ilyakava committed Jan 24, 2020
1 parent ccd1a52 commit 65a7594
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 4 deletions.
20 changes: 17 additions & 3 deletions kaffe/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

from .shapes import *

import pdb

LAYER_DESCRIPTORS = {

# Caffe Types
Expand Down Expand Up @@ -103,6 +105,7 @@ class LayerAdapter(object):
def __init__(self, layer, kind):
self.layer = layer
self.kind = kind
self._input_shape = None

@property
def parameters(self):
Expand All @@ -114,7 +117,7 @@ def parameters(self):
raise NodeDispatchError('Caffe parameters not found for layer kind: %s' % (self.kind))

@staticmethod
def get_kernel_value(scalar, repeated, idx, default=None):
def get_kernel_value(scalar, repeated, idx, default=None, params=None):
if scalar:
return scalar
if repeated:
Expand All @@ -127,15 +130,26 @@ def get_kernel_value(scalar, repeated, idx, default=None):
# Extract the value for the given spatial dimension
return repeated[idx]
if default is None:
#pdb.set_trace()
raise ValueError('Unable to determine kernel parameter!')
return default

def set_input_shape(self, input_shape):
self._input_shape = input_shape

@property
def kernel_parameters(self):
assert self.kind in (NodeKind.Convolution, NodeKind.Pooling)
params = self.parameters
k_h = self.get_kernel_value(params.kernel_h, params.kernel_size, 0)
k_w = self.get_kernel_value(params.kernel_w, params.kernel_size, 1)
global_pool = hasattr(params, 'global_pooling')
if params.kernel_size:
k_h = self.get_kernel_value(params.kernel_h, params.kernel_size, 0)
k_w = self.get_kernel_value(params.kernel_w, params.kernel_size, 1)
elif self._input_shape:
k_h, k_w = [self._input_shape.height, self._input_shape.width]
else: #errors out in get_kernel_value function
k_h = self.get_kernel_value(params.kernel_h, params.kernel_size, 0)
k_w = self.get_kernel_value(params.kernel_w, params.kernel_size, 1)
s_h = self.get_kernel_value(params.stride_h, params.stride, 0, default=1)
s_w = self.get_kernel_value(params.stride_w, params.stride, 1, default=1)
p_h = self.get_kernel_value(params.pad_h, params.pad, 0, default=0)
Expand Down
1 change: 1 addition & 0 deletions kaffe/shapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ def get_filter_output_shape(i_h, i_w, params, round_func):
def get_strided_kernel_output_shape(node, round_func):
assert node.layer is not None
input_shape = node.get_only_parent().output_shape
node.layer.set_input_shape(input_shape)
o_h, o_w = get_filter_output_shape(input_shape.height, input_shape.width,
node.layer.kernel_parameters, round_func)
params = node.layer.parameters
Expand Down
3 changes: 2 additions & 1 deletion kaffe/tensorflow/transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,8 @@ def map_dropout(self, node):
return TensorFlowNode('dropout', node.parameters.dropout_ratio)

def map_batch_norm(self, node):
scale_offset = len(node.data) == 4
scale_offset = len(node.output_shape) == 4
# scale_offset = len(node.data) == 4
kwargs = {} if scale_offset else {'scale_offset': False}
return MaybeActivated(node, default=False)('batch_normalization', **kwargs)

Expand Down

0 comments on commit 65a7594

Please sign in to comment.