当前位置: 首页 > 工具软件 > moving > 使用案例 >

python moving average_Python moving_averages.assign_moving_average方法代码示例

宇文良骏
2023-12-01

本文整理汇总了Python中tensorflow.python.training.moving_averages.assign_moving_average方法的典型用法代码示例。如果您正苦于以下问题:Python moving_averages.assign_moving_average方法的具体用法?Python moving_averages.assign_moving_average怎么用?Python moving_averages.assign_moving_average使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块tensorflow.python.training.moving_averages的用法示例。

在下文中一共展示了moving_averages.assign_moving_average方法的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: create_and_apply_batch_norm

​点赞 6

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def create_and_apply_batch_norm(self, inp, n_features, decay, tower_setup, scope_name="bn"):

beta, gamma, moving_mean, moving_var = create_batch_norm_vars(n_features, tower_setup, scope_name)

self.n_params += 2 * n_features

if tower_setup.is_main_train_tower:

assert tower_setup.is_training

if tower_setup.is_training and not tower_setup.freeze_batchnorm:

xn, batch_mean, batch_var = tf.nn.fused_batch_norm(inp, gamma, beta, epsilon=Layer.BATCH_NORM_EPSILON,

is_training=True)

if tower_setup.is_main_train_tower:

update_op1 = moving_averages.assign_moving_average(

moving_mean, batch_mean, decay, zero_debias=False, name='mean_ema_op')

update_op2 = moving_averages.assign_moving_average(

moving_var, batch_var, decay, zero_debias=False, name='var_ema_op')

self.update_ops.append(update_op1)

self.update_ops.append(update_op2)

return xn

else:

xn = tf.nn.batch_normalization(inp, moving_mean, moving_var, beta, gamma, Layer.BATCH_NORM_EPSILON)

return xn

开发者ID:tobiasfshr,项目名称:MOTSFusion,代码行数:21,

示例2: moving_average_update

​点赞 6

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def moving_average_update(x, value, momentum):

"""Compute the moving average of a variable.

# Arguments

x: A `Variable`.

value: A tensor with the same shape as `x`.

momentum: The moving average momentum.

# Returns

An operation to update the variable.

"""

return moving_averages.assign_moving_average(

x, value, momentum, zero_debias=True)

# LINEAR ALGEBRA

开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:18,

示例3: _adaptive_max_norm

​点赞 6

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):

"""Find max_norm given norm and previous average."""

with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):

log_norm = math_ops.log(norm + epsilon)

def moving_average(name, value, decay):

moving_average_variable = vs.get_variable(

name, shape=value.get_shape(), dtype=value.dtype,

initializer=init_ops.zeros_initializer, trainable=False)

return moving_averages.assign_moving_average(

moving_average_variable, value, decay, zero_debias=False)

# quicker adaptation at the beginning

if global_step is not None:

n = math_ops.to_float(global_step)

decay = math_ops.minimum(decay, n / (n + 1.))

# update averages

mean = moving_average("mean", log_norm, decay)

sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

variance = sq_mean - math_ops.square(mean)

std = math_ops.sqrt(math_ops.maximum(epsilon, variance))

max_norms = math_ops.exp(mean + std_factor*std)

return max_norms, mean

开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:27,

示例4: batch_normalization

​点赞 6

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def batch_normalization(incoming, is_training, beta=0.0, gamma=1.0, epsilon=1e-5, decay=0.9):

shape = incoming.get_shape()

dimensions_num = len(shape)

axis = list(range(dimensions_num - 1))

with tf.variable_scope('batchnorm'):

beta = tf.Variable(initial_value=tf.ones(shape=[shape[-1]]) * beta, name='beta')

gamma = tf.Variable(initial_value=tf.ones(shape=[shape[-1]]) * gamma, name='gamma')

moving_mean = tf.Variable(initial_value=tf.zeros(shape=shape[-1:]), trainable=False, name='moving_mean')

moving_variance = tf.Variable(initial_value=tf.zeros(shape=shape[-1:]), trainable=False, name='moving_variance')

def update_mean_var():

mean, variance = tf.nn.moments(incoming, axis)

update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)

update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay)

with tf.control_dependencies([update_moving_mean, update_moving_variance]):

return tf.identity(mean), tf.identity(variance)

mean, var = tf.cond(is_training, update_mean_var, lambda: (moving_mean, moving_variance))

inference = tf.nn.batch_normalization(incoming, mean, var, beta, gamma, epsilon)

inference.set_shape(shape)

return inference

开发者ID:maxim5,项目名称:time-series-machine-learning,代码行数:25,

示例5: batch_normalization

​点赞 6

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def batch_normalization(input, trainable, name, **kwargs):

input_shape = input.get_shape()

shape = input_shape.as_list()[-1::]

axis = list(range(len(input_shape) - 1))

moving_mean = tf.get_variable(shape=shape, initializer=tf.zeros_initializer, trainable=trainable, name=name + "_mean")

moving_variance = tf.get_variable(shape=shape, initializer=tf.ones_initializer, trainable=trainable, name=name + "_var")

offset = tf.get_variable(shape=shape, initializer=tf.zeros_initializer, trainable=trainable, name=name + "_bias")

scale = tf.get_variable(shape=shape, initializer=tf.ones_initializer, trainable=trainable, name=name + "_scale") if name != 'fc1' else None

mean, variance = tf.nn.moments(input, axis)

update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)

update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)

tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)

tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)

is_training = tf.convert_to_tensor(trainable, dtype='bool', name='is_training')

mean, variance = control_flow_ops.cond(is_training,

lambda: (mean, variance),

lambda: (moving_mean, moving_variance))

return tf.nn.batch_normalization(input, mean, variance, offset, scale, name=name, **kwargs)

开发者ID:yangxue0827,项目名称:MobileFaceNet_Tensorflow,代码行数:22,

示例6: update_bn_ema

​点赞 6

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def update_bn_ema(xn, batch_mean, batch_var,

moving_mean, moving_var, decay, internal_update):

update_op1 = moving_averages.assign_moving_average(

moving_mean, batch_mean, decay, zero_debias=False,

name='mean_ema_op')

update_op2 = moving_averages.assign_moving_average(

moving_var, batch_var, decay, zero_debias=False,

name='var_ema_op')

if internal_update:

with tf.control_dependencies([update_op1, update_op2]):

return tf.identity(xn, name='output')

else:

tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op1)

tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op2)

return tf.identity(xn, name='output')

开发者ID:microsoft,项目名称:petridishnn,代码行数:18,

示例7: get_output_for

​点赞 6

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def get_output_for(self, input, phase='train', **kwargs):

if phase == 'train':

# Calculate the moments based on the individual batch.

mean, variance = tf.nn.moments(input, self.axis, shift=self.moving_mean)

# Update the moving_mean and moving_variance moments.

update_moving_mean = moving_averages.assign_moving_average(

self.moving_mean, mean, self.decay)

update_moving_variance = moving_averages.assign_moving_average(

self.moving_variance, variance, self.decay)

# Make sure the updates are computed here.

with tf.control_dependencies([update_moving_mean,

update_moving_variance]):

output = tf.nn.batch_normalization(

input, mean, variance, self.beta, self.gamma, self.epsilon)

else:

output = tf.nn.batch_normalization(

input, self.moving_mean, self.moving_variance, self.beta, self.gamma, self.epsilon)

output.set_shape(self.input_shape)

return output

开发者ID:freelunchtheorem,项目名称:Conditional_Density_Estimation,代码行数:21,

示例8: batch_normalization

​点赞 6

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def batch_normalization(self, input, name):

with tf.variable_scope(name):

bn_input_shape = input.get_shape()

moving_mean = tf.get_variable(name+'_mean', bn_input_shape[-1:] , initializer=tf.zeros_initializer, trainable=False)

moving_variance = tf.get_variable(name+'_variance', bn_input_shape[-1:] , initializer=tf.ones_initializer, trainable=False)

def mean_var_with_update():

mean, variance = tf.nn.moments(input, list(range(len(bn_input_shape) - 1)), name=name+'_moments')

with tf.control_dependencies([assign_moving_average(moving_mean, mean, self.conv_bn_decay),assign_moving_average(moving_variance, variance, self.conv_bn_decay)]):

return tf.identity(mean), tf.identity(variance)

#mean, variance = tf.cond(tf.cast(self.isTraining, tf.bool), mean_var_with_update, lambda: (moving_mean, moving_variance))

mean, variance = tf.cond(tf.cast(True, tf.bool), mean_var_with_update, lambda: (moving_mean, moving_variance))

beta = tf.get_variable(name+'_beta', bn_input_shape[-1:] , initializer=tf.zeros_initializer)

gamma = tf.get_variable(name+'_gamma', bn_input_shape[-1:] , initializer=tf.ones_initializer)

return tf.nn.batch_normalization(input, mean, variance, beta, gamma, self.conv_bn_epsilon, name+'_bn_opt')

# smooth_L1 算法

开发者ID:lslcode,项目名称:SSD_for_Tensorflow,代码行数:18,

示例9: moving_average_update

​点赞 6

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def moving_average_update(x, value, momentum):

"""Compute the moving average of a variable.

# Arguments

x: A `Variable`.

value: A tensor with the same shape as `x`.

momentum: The moving average momentum.

# Returns

An operation to update the variable.

"""

return moving_averages.assign_moving_average(

x, value, momentum, zero_debias=False)

# LINEAR ALGEBRA

开发者ID:sheffieldnlp,项目名称:deepQuest,代码行数:18,

示例10: vq_discrete_bottleneck

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def vq_discrete_bottleneck(x, hparams):

"""Simple vector quantized discrete bottleneck."""

tf.logging.info("Using EMA with beta = {}".format(hparams.beta))

bottleneck_size = 2**hparams.bottleneck_bits

x_shape = common_layers.shape_list(x)

x = tf.reshape(x, [-1, hparams.hidden_size])

x_means_hot, e_loss = vq_nearest_neighbor(

x, hparams)

means, ema_means, ema_count = (hparams.means, hparams.ema_means,

hparams.ema_count)

# Update the ema variables

updated_ema_count = moving_averages.assign_moving_average(

ema_count,

tf.reduce_sum(x_means_hot, axis=0),

hparams.decay,

zero_debias=False)

dw = tf.matmul(x_means_hot, x, transpose_a=True)

updated_ema_means = moving_averages.assign_moving_average(

ema_means, dw, hparams.decay, zero_debias=False)

n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True)

updated_ema_count = (

(updated_ema_count + hparams.epsilon) /

(n + bottleneck_size * hparams.epsilon) * n)

# pylint: disable=g-no-augmented-assignment

updated_ema_means = updated_ema_means / tf.expand_dims(

updated_ema_count, axis=-1)

# pylint: enable=g-no-augmented-assignment

with tf.control_dependencies([e_loss]):

update_means = tf.assign(means, updated_ema_means)

with tf.control_dependencies([update_means]):

loss = hparams.beta * e_loss

discrete = tf.reshape(x_means_hot, x_shape[:-1] + [bottleneck_size])

return discrete, loss

开发者ID:akzaidi,项目名称:fine-lm,代码行数:38,

示例11: vq_discrete_bottleneck

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def vq_discrete_bottleneck(x,

bottleneck_bits,

beta=0.25,

decay=0.999,

epsilon=1e-5,

soft_em=False,

num_samples=10):

"""Simple vector quantized discrete bottleneck."""

bottleneck_size = 2**bottleneck_bits

x_shape = common_layers.shape_list(x)

hidden_size = x_shape[-1]

means, ema_means, ema_count = get_vq_bottleneck(bottleneck_size, hidden_size)

x = tf.reshape(x, [-1, hidden_size])

x_means_hot, e_loss = vq_nearest_neighbor(

x, means, soft_em=soft_em, num_samples=num_samples)

# Update the ema variables

updated_ema_count = moving_averages.assign_moving_average(

ema_count,

tf.reduce_sum(

tf.reshape(x_means_hot, shape=[-1, bottleneck_size]), axis=0),

decay,

zero_debias=False)

dw = tf.matmul(x_means_hot, x, transpose_a=True)

updated_ema_means = tf.identity(moving_averages.assign_moving_average(

ema_means, dw, decay, zero_debias=False))

n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True)

updated_ema_count = (

(updated_ema_count + epsilon) / (n + bottleneck_size * epsilon) * n)

updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1)

with tf.control_dependencies([e_loss]):

update_means = means.assign(updated_ema_means)

with tf.control_dependencies([update_means]):

loss = beta * e_loss

d = tf.reshape(x_means_hot, x_shape[:-1] + [bottleneck_size])

return d, loss

开发者ID:akzaidi,项目名称:fine-lm,代码行数:40,

示例12: batchNormalization

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def batchNormalization(x, is_training, decay= 0.9, epsilon= 0.001, inference_only= False):

x_shape = x.get_shape()

params_shape = x_shape[-1:]

axis = list(range(len(x_shape) - 1))

beta = _get_variable('beta',

params_shape,

initializer= tf.zeros_initializer)

gamma = _get_variable('gamma',

params_shape,

initializer= tf.ones_initializer)

moving_mean = _get_variable('moving_mean',

params_shape,

initializer= tf.zeros_initializer,

trainable= False)

moving_variance = _get_variable('moving_variance',

params_shape,

initializer= tf.ones_initializer,

trainable= False)

# These ops will only be preformed when training.

mean, variance = tf.nn.moments(x, axis)

update_moving_mean = moving_averages.assign_moving_average(moving_mean,

mean, decay)

update_moving_variance = moving_averages.assign_moving_average(

moving_variance, variance, decay)

tf.add_to_collection(tf.GraphKeys.UPDATE_OPS , update_moving_mean)

tf.add_to_collection(tf.GraphKeys.UPDATE_OPS , update_moving_variance)

return tf.cond(is_training, lambda: tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon), lambda: tf.nn.batch_normalization(x, moving_mean, moving_variance, beta, gamma, epsilon))

#return tf.contrib.layers.batch_norm(x, decay= decay, epsilon= epsilon, is_training= is_training)

# Flatten Layer

开发者ID:arashno,项目名称:tensorflow_multigpu_imagenet,代码行数:38,

示例13: _batch_norm_without_layers

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon):

"""Batch normalization on `input_layer` without tf.layers."""

# We make this function as similar as possible to the

# tf.contrib.layers.batch_norm, to minimize the differences between using

# layers and not using layers.

shape = input_layer.shape

num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]

beta = self.get_variable('beta', [num_channels], tf.float32, tf.float32,

initializer=tf.zeros_initializer())

if use_scale:

gamma = self.get_variable('gamma', [num_channels], tf.float32,

tf.float32, initializer=tf.ones_initializer())

else:

gamma = tf.constant(1.0, tf.float32, [num_channels])

# For moving variables, we use tf.get_variable instead of self.get_variable,

# since self.get_variable returns the result of tf.cast which we cannot

# assign to.

moving_mean = tf.get_variable('moving_mean', [num_channels],

tf.float32,

initializer=tf.zeros_initializer(),

trainable=False)

moving_variance = tf.get_variable('moving_variance', [num_channels],

tf.float32,

initializer=tf.ones_initializer(),

trainable=False)

if self.phase_train:

bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(

input_layer, gamma, beta, epsilon=epsilon,

data_format=self.data_format, is_training=True)

mean_update = moving_averages.assign_moving_average(

moving_mean, batch_mean, decay=decay, zero_debias=False)

variance_update = moving_averages.assign_moving_average(

moving_variance, batch_variance, decay=decay, zero_debias=False)

tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)

tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)

else:

bn, _, _ = tf.nn.fused_batch_norm(

input_layer, gamma, beta, mean=moving_mean,

variance=moving_variance, epsilon=epsilon,

data_format=self.data_format, is_training=False)

return bn

开发者ID:tensorpack,项目名称:benchmarks,代码行数:43,

示例14: __call__

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def __call__(self, input_layer, epsilon=1e-5, decay=0.9, name="batch_norm",

in_dim=None, phase=Phase.train):

shape = input_layer.shape

shp = in_dim or shape[-1]

with tf.variable_scope(name) as scope:

self.mean = self.variable('mean', [shp], init=tf.constant_initializer(0.), train=False)

self.variance = self.variable('variance', [shp], init=tf.constant_initializer(1.0), train=False)

self.gamma = self.variable("gamma", [shp], init=tf.random_normal_initializer(1., 0.02))

self.beta = self.variable("beta", [shp], init=tf.constant_initializer(0.))

if phase == Phase.train:

mean, variance = tf.nn.moments(input_layer.tensor, [0, 1, 2])

mean.set_shape((shp,))

variance.set_shape((shp,))

update_moving_mean = moving_averages.assign_moving_average(self.mean, mean, decay)

update_moving_variance = moving_averages.assign_moving_average(self.variance, variance, decay)

with tf.control_dependencies([update_moving_mean, update_moving_variance]):

normalized_x = tf.nn.batch_norm_with_global_normalization(

input_layer.tensor, mean, variance, self.beta, self.gamma, epsilon,

scale_after_normalization=True)

else:

normalized_x = tf.nn.batch_norm_with_global_normalization(

input_layer.tensor, self.mean, self.variance,

self.beta, self.gamma, epsilon,

scale_after_normalization=True)

return input_layer.with_tensor(normalized_x, parameters=self.vars)

开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:31,

示例15: _adaptive_max_norm

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):

"""Find max_norm given norm and previous average."""

with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):

log_norm = math_ops.log(norm + epsilon)

def moving_average(name, value, decay):

moving_average_variable = vs.get_variable(

name,

shape=value.get_shape(),

dtype=value.dtype,

initializer=init_ops.zeros_initializer(),

trainable=False)

return moving_averages.assign_moving_average(

moving_average_variable, value, decay, zero_debias=False)

# quicker adaptation at the beginning

if global_step is not None:

n = math_ops.cast(global_step, dtypes.float32)

decay = math_ops.minimum(decay, n / (n + 1.))

# update averages

mean = moving_average("mean", log_norm, decay)

sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

variance = sq_mean - math_ops.square(mean)

std = math_ops.sqrt(math_ops.maximum(epsilon, variance))

max_norms = math_ops.exp(mean + std_factor * std)

return max_norms, mean

开发者ID:taehoonlee,项目名称:tensornets,代码行数:30,

示例16: moving_average_update

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def moving_average_update(x, value, momentum):

return moving_averages.assign_moving_average(

x, value, momentum, zero_debias=False)

# LINEAR ALGEBRA

开发者ID:ryfeus,项目名称:lambda-packs,代码行数:8,

示例17: _adaptive_max_norm

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):

"""Find max_norm given norm and previous average."""

with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):

log_norm = math_ops.log(norm + epsilon)

def moving_average(name, value, decay):

moving_average_variable = vs.get_variable(

name,

shape=value.get_shape(),

dtype=value.dtype,

initializer=init_ops.zeros_initializer(),

trainable=False)

return moving_averages.assign_moving_average(

moving_average_variable, value, decay, zero_debias=False)

# quicker adaptation at the beginning

if global_step is not None:

n = math_ops.to_float(global_step)

decay = math_ops.minimum(decay, n / (n + 1.))

# update averages

mean = moving_average("mean", log_norm, decay)

sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

variance = sq_mean - math_ops.square(mean)

std = math_ops.sqrt(math_ops.maximum(epsilon, variance))

max_norms = math_ops.exp(mean + std_factor * std)

return max_norms, mean

开发者ID:ryfeus,项目名称:lambda-packs,代码行数:30,

示例18: moving_average_update

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def moving_average_update(variable, value, momentum):

try:

return moving_averages.assign_moving_average(

variable, value, momentum, zero_debias=False)

except TypeError:

return moving_averages.assign_moving_average(variable, value, momentum)

开发者ID:simonfqy,项目名称:PADME,代码行数:8,

示例19: batch_norm

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def batch_norm(x, is_training, name="batch_norm", decay=0.999, epsilon=1.0):

shape = x.get_shape()[1]

with tf.variable_scope(name, reuse=None if is_training else True):

offset = tf.get_variable(

"offset", shape,

initializer=tf.constant_initializer(0.0, dtype=tf.float32))

scale = tf.get_variable(

"scale", shape,

initializer=tf.constant_initializer(1.0, dtype=tf.float32))

moving_mean = tf.get_variable(

"moving_mean", shape, trainable=False,

initializer=tf.constant_initializer(0.0, dtype=tf.float32))

moving_variance = tf.get_variable(

"moving_variance", shape, trainable=False,

initializer=tf.constant_initializer(1.0, dtype=tf.float32))

if is_training:

mean, variance = tf.nn.moments(x, [0])

update_mean = moving_averages.assign_moving_average(

moving_mean, mean, decay)

update_variance = moving_averages.assign_moving_average(

moving_variance, variance, decay)

with tf.control_dependencies([update_mean, update_variance]):

x = scale * (x - mean) / tf.sqrt(epsilon + variance) + offset

else:

x = scale * (x - moving_mean) / tf.sqrt(epsilon + moving_variance) + offset

return x

开发者ID:muhanzhang,项目名称:D-VAE,代码行数:30,

示例20: batch_norm

​点赞 5

# 需要导入模块: from tensorflow.python.training import moving_averages [as 别名]

# 或者: from tensorflow.python.training.moving_averages import assign_moving_average [as 别名]

def batch_norm(x, is_training, name="bn", decay=0.9, epsilon=1e-5,

data_format="NHWC"):

if data_format == "NHWC":

shape = [x.get_shape()[3]]

elif data_format == "NCHW":

shape = [x.get_shape()[1]]

else:

raise NotImplementedError("Unknown data_format {}".format(data_format))

with tf.variable_scope(name, reuse=None if is_training else True):

offset = tf.get_variable(

"offset", shape,

initializer=tf.constant_initializer(0.0, dtype=tf.float32))

scale = tf.get_variable(

"scale", shape,

initializer=tf.constant_initializer(1.0, dtype=tf.float32))

moving_mean = tf.get_variable(

"moving_mean", shape, trainable=False,

initializer=tf.constant_initializer(0.0, dtype=tf.float32))

moving_variance = tf.get_variable(

"moving_variance", shape, trainable=False,

initializer=tf.constant_initializer(1.0, dtype=tf.float32))

if is_training:

x, mean, variance = tf.nn.fused_batch_norm(

x, scale, offset, epsilon=epsilon, data_format=data_format,

is_training=True)

update_mean = moving_averages.assign_moving_average(

moving_mean, mean, decay)

update_variance = moving_averages.assign_moving_average(

moving_variance, variance, decay)

with tf.control_dependencies([update_mean, update_variance]):

x = tf.identity(x)

else:

x, _, _ = tf.nn.fused_batch_norm(x, scale, offset, mean=moving_mean,

variance=moving_variance,

epsilon=epsilon, data_format=data_format,

is_training=False)

return x

开发者ID:muhanzhang,项目名称:D-VAE,代码行数:41,

注:本文中的tensorflow.python.training.moving_averages.assign_moving_average方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。

 类似资料: