Skip to content

Commit

Permalink
Fix bug in v1 estimators that was preventing use of microbatches.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 560149248
  • Loading branch information
schien1729 authored and tensorflower-gardener committed Aug 26, 2023
1 parent 6248be8 commit e8bd0ba
Show file tree
Hide file tree
Showing 3 changed files with 51 additions and 21 deletions.
15 changes: 10 additions & 5 deletions tensorflow_privacy/privacy/estimators/v1/dnn_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,20 @@
from tensorflow_privacy.privacy.estimators.v1 import dnn
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer

# pylint: disable=g-deprecated-tf-checker


class DPDNNClassifierTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for DP-enabled DNNClassifier."""

@parameterized.named_parameters(
('BinaryClassDNN', 2),
('MultiClassDNN 3', 3),
('MultiClassDNN 4', 4),
('BinaryClassDNN', 2, 1),
('BinaryClassDNN 4', 2, 4),
('MultiClassDNN 3', 3, 1),
('MultiClassDNN 4', 4, 1),
('MultiClassDNN 4 4', 4, 4),
)
def testDNN(self, n_classes):
def testDNN(self, n_classes, num_microbatches):
train_features, train_labels = test_utils.make_input_data(256, n_classes)
feature_columns = []
for key in train_features:
Expand All @@ -40,7 +44,8 @@ def testDNN(self, n_classes):
learning_rate=0.5,
l2_norm_clip=1.0,
noise_multiplier=0.0,
num_microbatches=1)
num_microbatches=num_microbatches,
)

classifier = dnn.DNNClassifier(
hidden_units=[10],
Expand Down
43 changes: 32 additions & 11 deletions tensorflow_privacy/privacy/estimators/v1/head.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,15 @@
import tensorflow as tf

from tensorflow.python.ops import lookup_ops # pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-deprecated-tf-checker
from tensorflow_estimator.python.estimator import model_fn
from tensorflow_estimator.python.estimator.canned import head as head_lib
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.export import export_output
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys


# Collect together all protected access items needed from base head.
# pylint: disable=protected-access
_DEFAULT_SERVING_KEY = head_lib._DEFAULT_SERVING_KEY
Expand All @@ -39,8 +41,12 @@
_summary_key = head_lib._summary_key
_validate_loss_fn_args = head_lib._validate_loss_fn_args

_BaseBinaryLogisticHeadWithSigmoidCrossEntropyLoss = head_lib._BinaryLogisticHeadWithSigmoidCrossEntropyLoss
_BaseMultiClassHeadWithSoftmaxCrossEntropyLoss = head_lib._MultiClassHeadWithSoftmaxCrossEntropyLoss
_BaseBinaryLogisticHeadWithSigmoidCrossEntropyLoss = (
head_lib._BinaryLogisticHeadWithSigmoidCrossEntropyLoss
)
_BaseMultiClassHeadWithSoftmaxCrossEntropyLoss = (
head_lib._MultiClassHeadWithSoftmaxCrossEntropyLoss
)
# pylint: enable=protected-access


Expand Down Expand Up @@ -146,25 +152,33 @@ def _create_tpu_estimator_spec(self,
classifier_output = _classification_output(
scores=probabilities,
n_classes=self._n_classes,
label_vocabulary=self._label_vocabulary)
label_vocabulary=self._label_vocabulary,
)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: classifier_output,
_CLASSIFY_SERVING_KEY: classifier_output,
_PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
})
_PREDICT_SERVING_KEY: export_output.PredictOutput(predictions),
},
)

training_loss, unreduced_loss, weights, label_ids = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
features=features, mode=mode, logits=logits, labels=labels
)
if regularization_losses:
regularization_loss = tf.math.add_n(regularization_losses)
regularized_training_loss = tf.math.add_n(
[training_loss, regularization_loss])
[training_loss, regularization_loss]
)
unreduced_regularized_training_loss = tf.math.add(
unreduced_loss, regularization_loss
)
else:
regularization_loss = None
regularized_training_loss = training_loss
unreduced_regularized_training_loss = unreduced_loss

if self._loss_reduction == tf.compat.v1.losses.Reduction.NONE:
scalar_loss = tf.reduce_mean(regularized_training_loss)
Expand All @@ -191,8 +205,10 @@ def _create_tpu_estimator_spec(self,
if train_op_fn is not None:
raise ValueError('train_op_fn and optimizer cannot both be set.')
train_op = optimizer.minimize(
regularized_training_loss,
global_step=tf.compat.v1.train.get_global_step())
# regularized_training_loss,
unreduced_regularized_training_loss,
global_step=tf.compat.v1.train.get_global_step(),
)
elif train_op_fn is not None:
train_op = train_op_fn(regularized_training_loss)
else:
Expand Down Expand Up @@ -352,9 +368,13 @@ def _create_tpu_estimator_spec(self,
regularization_loss = tf.math.add_n(regularization_losses)
regularized_training_loss = tf.math.add_n(
[training_loss, regularization_loss])
unreduced_regularized_training_loss = tf.math.add(
unreduced_loss, regularization_loss
)
else:
regularization_loss = None
regularized_training_loss = training_loss
unreduced_regularized_training_loss = unreduced_loss

if self._loss_reduction == tf.compat.v1.losses.Reduction.NONE:
scalar_loss = tf.reduce_mean(regularized_training_loss)
Expand Down Expand Up @@ -382,8 +402,9 @@ def _create_tpu_estimator_spec(self,
if train_op_fn is not None:
raise ValueError('train_op_fn and optimizer cannot both be set.')
train_op = optimizer.minimize(
regularized_training_loss,
global_step=tf.compat.v1.train.get_global_step())
unreduced_regularized_training_loss,
global_step=tf.compat.v1.train.get_global_step(),
)
elif train_op_fn is not None:
train_op = train_op_fn(regularized_training_loss)
else:
Expand Down
14 changes: 9 additions & 5 deletions tensorflow_privacy/privacy/estimators/v1/linear_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,22 @@
from tensorflow_privacy.privacy.estimators.v1 import linear
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer

# pylint: disable=g-deprecated-tf-checker


class DPLinearClassifierClassifierTest(
tf.test.TestCase, parameterized.TestCase
):
"""Tests for DP-enabled LinearClassifier."""

@parameterized.named_parameters(
('BinaryClassLinear', 2),
('MultiClassLinear 3', 3),
('MultiClassLinear 4', 4),
('BinaryClassLinear 1', 2, 1),
('BinaryClassLinear 4', 2, 4),
('MultiClassLinear 3', 3, 1),
('MultiClassLinear 4', 4, 1),
('MultiClassLinear 4 1', 4, 2),
)
def testLinearClassifier(self, n_classes):
def testRunsWithoutErrors(self, n_classes, num_microbatches):
train_features, train_labels = test_utils.make_input_data(256, n_classes)
feature_columns = []
for key in train_features:
Expand All @@ -43,7 +47,7 @@ def testLinearClassifier(self, n_classes):
learning_rate=0.5,
l2_norm_clip=1.0,
noise_multiplier=0.0,
num_microbatches=1,
num_microbatches=num_microbatches,
)

classifier = linear.LinearClassifier(
Expand Down

0 comments on commit e8bd0ba

Please sign in to comment.