Python keras.backend 模块,set_image_data_format() 实例源码
我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用keras.backend.set_image_data_format()。
def test_DSSIM_channels_last():
prev_data = K.image_data_format()
K.set_image_data_format('channels_last')
for input_dim, kernel_size in zip([32, 33], [2, 3]):
input_shape = [input_dim, input_dim, 3]
X = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
y = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
model.add(Conv2D(3, activation='relu'))
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'], optimizer=adam)
model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')
# Test same
x1 = K.constant(X, 'float32')
x2 = K.constant(X, 'float32')
dssim = DSSIMObjective(kernel_size=kernel_size)
assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)
# Test opposite
x1 = K.zeros([4] + input_shape)
x2 = K.ones([4] + input_shape)
dssim = DSSIMObjective(kernel_size=kernel_size)
assert_allclose(0.5, atol=1e-4)
K.set_image_data_format(prev_data)
def test_DSSIM_channels_first():
prev_data = K.image_data_format()
K.set_image_data_format('channels_first')
for input_dim, 3]):
input_shape = [3, input_dim]
X = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
y = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
model = Sequential()
model.add(Conv2D(32, atol=1e-4)
K.set_image_data_format(prev_data)
def test_smoke_channels_first():
K.set_image_data_format('channels_first')
_test_smoke('channels_first')
def test_smoke_channels_last():
K.set_image_data_format('channels_last')
_test_smoke('channels_last')
def test_equivalence_channels_first():
K.set_image_data_format('channels_first')
_test_equivalence('channels_first')
def test_equivalence_channels_last():
K.set_image_data_format('channels_last')
_test_equivalence('channels_last')
def MyCNN_Keras2(X, nb_classes, nb_layers=4):
from keras import backend as K
K.set_image_data_format('channels_first')
nb_filters = 32 # number of convolutional filters = "feature maps"
kernel_size = (3, 3) # convolution kernel size
pool_size = (2, 2) # size of pooling area for max pooling
cl_dropout = 0.5 # conv. layer dropout
dl_dropout = 0.8 # dense layer dropout
channels = X.shape[1] # channels = 1 for mono,2 for stereo
print(" MyCNN_Keras2: X.shape = ",X.shape,",channels = ",channels)
input_shape = (channels, X.shape[2], X.shape[3])
model = Sequential()
#model.add(Conv2D(nb_filters,kernel_size,border_mode='valid',input_shape=input_shape))
model.add(Conv2D(nb_filters, kernel_size, border_mode='valid', input_shape=input_shape))
model.add(BatchNormalization(axis=1))
model.add(Activation('relu'))
for layer in range(nb_layers-1): # add more layers than just the first
model.add(Conv2D(nb_filters, kernel_size))
model.add(BatchNormalization(axis=1))
model.add(ELU(alpha=1.0))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(cl_dropout))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(dl_dropout))
model.add(Dense(nb_classes))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
def set_img_format():
try:
if K.backend() == 'theano':
K.set_image_data_format('channels_first')
else:
K.set_image_data_format('channels_last')
except AttributeError:
if K._BACKEND == 'theano':
K.set_image_dim_ordering('th')
else:
K.set_image_dim_ordering('tf')
def test_vgg16():
for data_format in ['channels_first', 'channels_last']:
K.set_image_data_format(data_format)
if K.image_data_format() == 'channels_first':
x = Input(shape=(3, 500, 500))
pool1_shape = (None, 64, 250, 250)
pool2_shape = (None, 128, 125, 125)
pool3_shape = (None, 256, 63, 63)
pool4_shape = (None, 512, 32, 32)
drop7_shape = (None, 4096, 16, 16)
conv1_weight = -0.35009676
else:
x = Input(shape=(500, 3))
pool1_shape = (None, 64)
pool2_shape = (None, 128)
pool3_shape = (None, 256)
pool4_shape = (None, 512)
drop7_shape = (None, 4096)
conv1_weight = 0.429471
encoder = VGG16(x, weights='imagenet', trainable=False)
feat_pyramid = encoder.outputs
assert len(feat_pyramid) == 5
assert K.int_shape(feat_pyramid[0]) == drop7_shape
assert K.int_shape(feat_pyramid[1]) == pool4_shape
assert K.int_shape(feat_pyramid[2]) == pool3_shape
assert K.int_shape(feat_pyramid[3]) == pool2_shape
assert K.int_shape(feat_pyramid[4]) == pool1_shape
for layer in encoder.layers:
if layer.name == 'block1_conv1':
assert layer.trainable is False
weights = K.eval(layer.weights[0])
assert np.allclose(weights[0, 0, 0], conv1_weight)
encoder_from_scratch = VGG16(x, weights=None, trainable=True)
for layer in encoder_from_scratch.layers:
if layer.name == 'block1_conv1':
assert layer.trainable is True
weights = K.eval(layer.weights[0])
assert not np.allclose(weights[0, conv1_weight)
def test_vgg19():
for data_format in ['channels_first', 4096)
conv1_weight = 0.429471
encoder = VGG19(x, conv1_weight)
encoder_from_scratch = VGG19(x, conv1_weight)
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 [email protected] 举报,一经查实,本站将立刻删除。