import math
from keras import backend as K
# Define our custom metric
def PSNR(y_true, y_pred):
max_pixel = 1.0
return 10.0 * math.log10((max_pixel ** 2) / (K.mean(K.square(y_pred - y_true))))
# Define our custom loss function
def charbonnier(y_true, y_pred):
epsilon = 1e-3
error = y_true - y_pred
p = K.sqrt(K.square(error) + K.square(epsilon))
return K.mean(p)
# Compile our model
adam = Adam(lr=0.0001)
model.compile(loss=[charbonnier], metrics=[PSNR], optimizer=adam)
与度量和损失函数类似,如果你想要使用标准卷积,池化和激活函数之外的东西,你可能会发现自己需要创建自定义的层。在这种情况下,你可以按照我在下面给出的代码示例来实现它!
从Keras文档中我们最需要实现的是:
import tensorflow as tf
def tf_int_round(num):
return tf.cast(tf.round(num), dtype=tf.int32)
class resize_layer(layers.Layer):
# Initialize variables
def __init__(self, scale, **kwargs):
self.scale = scale
super(resize_layer, self).__init__(**kwargs)
def build(self, input_shape):
super(resize_layer,self).build(input_shape)
# Defining how we will call our function
def call(self, x, method="bicubic"):
height = tf_int_round(tf.cast(tf.shape(x)[1],dtype=tf.float32) * self.scale)
width = tf_int_round(tf.cast(tf.shape(x)[2],dtype=tf.float32) * self.scale)
if method == "bilinear":
return tf.image.resize_bilinear(x, size=(height, width))
elif method == "bicubic":
return tf.image.resize_bicubic(x, size=(height, width))
elif method == "nearest":
return tf.image.resize_nearest_neighbor(x, size=(height, width))
# Defining the computation of the output shape
def get_output_shape_for(self, input_shape):
height = tf_int_round(tf.cast(tf.shape(x)[1],dtype=tf.float32) * self.scale)
width = tf_int_round(tf.cast(tf.shape(x)[2],dtype=tf.float32) * self.scale)
return (self.input_shape[0], height, width, input_shape[3])
# Using our new custom layer with the Functional API
image_2 = resize_layer(scale=2)(image, method="bilinear")
Keras带有几个在ImageNet上具有预训练的权重的模型,你可以直接使用它们。但是,如果你想直接使用这些模型,需要事先调整图像大小,因为最后完全连接层会强制固定输入大小。例如,Xception模型使用299x299的图像进行训练,那么所有图像都必须设置为大小以避免错误。除此之外,模型可能会有一些其他类型的你希望在向模型传递图像时自动应用它们的预处理或后处理。
我们可以使用Keras的Lambda层在模型中内置任何数学或预处理操作!lambda将简单地定义你要应用的操作。全层Lambda允许你将功能完全融入模型中。查看下面的代码,了解我们如何在模型中嵌入重新调整大小以及Xception的预处理!
from keras.applications.nasnet import Xception, preprocess_input
from keras.models import Sequential, Model
from keras.layers.core import Lambda
from keras.backend import tf as ktf
# Initialize a Xception model
Xception_model = Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None)
# Any required pre-processing should be baked into the model
input_tensor = Input(shape=(None, None, 3))
x = Lambda(lambda image: ktf.image.resize_images(image, (299, 299)))(input_tensor)
x = Lambda(lambda image: preprocess_input(image))(x)
output_tensor = Xception_model(x)
final_Xception_model = Model(input_tensor, output_tensor)
如果我们想要编写一个大型模型,比如50或甚至100层深的模型,代码就会变得非常混乱。当你必须定义极多的层,除非都是残差连接或稠密连接,否则你会发现代码极为散乱!
相反,我们实际上可以使用functional API的一个小技巧,将重复代码块定义为函数。例如,ResNet具有许多具有相同基本组件(批标准化,激活函数和卷积)的重复的残差块。因此,我们可以简单地将这些操作定义为函数中的一个块,从而极大地简化代码。查看下面的代码,它实现了ResNet和DenseNet块,并向你展示了如何使用它们。
def preact_conv(inputs, k=3, filters=64):
outputs = BatchNormalization()(inputs)
outputs = Activation('relu')(outputs)
outputs = Conv2D(filters, kernel_size=(k, k), padding='same',
kernel_initializer="glorot_normal")(outputs)
return outputs
def ResidualBlock(inputs, kernal_size=3, filters=64):
outputs = preact_conv(inputs, k=kernal_size, n_filters=filters)
outputs = preact_conv(outputs, k=kernal_size, n_filters=filters)
outputs = add([outputs, inputs])
return outputs
def DenseBlock(stack, n_layers, growth_rate):
new_features = []
for i in range(n_layers):
layer = preact_conv(stack, filters=growth_rate)
new_features.append(layer)
# stack new layer
stack = concatenate([stack, layer], axis=-1)
new_features = concatenate(new_features, axis=-1)
return new_features
# Applying a stack of 5 Residual Blocks for a ResNet, just 5 lines of code
# If we wrote this out layer by layer, this would probably take 4-5x the number of lines
x = ResidualBlock(x)
x = ResidualBlock(x)
x = ResidualBlock(x)
x = ResidualBlock(x)
x = ResidualBlock(x)
# Applying a stack of 5 Dense Blocks for a DenseNet, just 5 lines of code
# DenseNets are even more complex to implements than ResNets, so if we wrote
# this out layer by layer, this would probably take 5-10x the number of lines
x = DenseBlock(x, n_layers=4, growth_rate=12)
x = DenseBlock(x, n_layers=6, growth_rate=12)
x = DenseBlock(x, n_layers=8, growth_rate=12)
x = DenseBlock(x, n_layers=10, growth_rate=12)
x = DenseBlock(x, n_layers=12, growth_rate=12)