将# 计算压缩前图像大小 original_size = os.path.getsize('your_image_path.png') # 计算压缩后图像大小 compressed_size = os.path.getsize('compressed_image.jpg') # 计算压缩率 compression_ratio = compressed_size / original_size输出
时间: 2024-02-18 21:01:26 浏览: 110
当您完成了JPEG图像压缩和计算压缩率后,可以使用以下代码将结果输出:
```python
print('原图像大小:', original_size)
print('压缩后图像大小:', compressed_size)
print('压缩率:', compression_ratio)
```
这将打印出原始图像的大小,压缩后图像的大小以及压缩率。请确保将`original_size`和`compressed_size`设置为字节为单位的整数值。
相关问题
逐行详细解释以下代码并加注释from tensorflow import keras import matplotlib.pyplot as plt base_image_path = keras.utils.get_file( "coast.jpg", origin="https://img-datasets.s3.amazonaws.com/coast.jpg") plt.axis("off") plt.imshow(keras.utils.load_img(base_image_path)) #instantiating a model from tensorflow.keras.applications import inception_v3 model = inception_v3.InceptionV3(weights='imagenet',include_top=False) #配置各层对DeepDream损失的贡献 layer_settings = { "mixed4": 1.0, "mixed5": 1.5, "mixed6": 2.0, "mixed7": 2.5, } outputs_dict = dict( [ (layer.name, layer.output) for layer in [model.get_layer(name) for name in layer_settings.keys()] ] ) feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict) #定义损失函数 import tensorflow as tf def compute_loss(input_image): features = feature_extractor(input_image) loss = tf.zeros(shape=()) for name in features.keys(): coeff = layer_settings[name] activation = features[name] loss += coeff * tf.reduce_mean(tf.square(activation[:, 2:-2, 2:-2, :])) return loss #梯度上升过程 @tf.function def gradient_ascent_step(image, learning_rate): with tf.GradientTape() as tape: tape.watch(image) loss = compute_loss(image) grads = tape.gradient(loss, image) grads = tf.math.l2_normalize(grads) image += learning_rate * grads return loss, image def gradient_ascent_loop(image, iterations, learning_rate, max_loss=None): for i in range(iterations): loss, image = gradient_ascent_step(image, learning_rate) if max_loss is not None and loss > max_loss: break print(f"... Loss value at step {i}: {loss:.2f}") return image #hyperparameters step = 20. num_octave = 3 octave_scale = 1.4 iterations = 30 max_loss = 15. #图像处理方面 import numpy as np def preprocess_image(image_path): img = keras.utils.load_img(image_path) img = keras.utils.img_to_array(img) img = np.expand_dims(img, axis=0) img = keras.applications.inception_v3.preprocess_input(img) return img def deprocess_image(img): img = img.reshape((img.shape[1], img.shape[2], 3)) img /= 2.0 img += 0.5 img *= 255. img = np.clip(img, 0, 255).astype("uint8") return img #在多个连续 上运行梯度上升 original_img = preprocess_image(base_image_path) original_shape = original_img.shape[1:3] successive_shapes = [original_shape] for i in range(1, num_octave): shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape]) successive_shapes.append(shape) successive_shapes = successive_shapes[::-1] shrunk_original_img = tf.image.resize(original_img, successive_shapes[0]) img = tf.identity(original_img) for i, shape in enumerate(successive_shapes): print(f"Processing octave {i} with shape {shape}") img = tf.image.resize(img, shape) img = gradient_ascent_loop( img, iterations=iterations, learning_rate=step, max_loss=max_loss ) upscaled_shrunk_original_img = tf.image.resize(shrunk_original_img, shape) same_size_original = tf.image.resize(original_img, shape) lost_detail = same_size_original - upscaled_shrunk_original_img img += lost_detail shrunk_original_img = tf.image.resize(original_img, shape) keras.utils.save_img("DeepDream.png", deprocess_image(img.numpy()))
这段代码是一个使用 DeepDream 技术对一张图像进行处理的例子。下面对每一部分进行详细解释:
1. 导入所需的库
```python
from tensorflow import keras
import matplotlib.pyplot as plt
```
导入了 TensorFlow 和 Keras 库,以及用于绘制图像的 Matplotlib 库。
2. 加载图像
```python
base_image_path = keras.utils.get_file(
"coast.jpg", origin="https://img-datasets.s3.amazonaws.com/coast.jpg")
plt.axis("off")
plt.imshow(keras.utils.load_img(base_image_path))
```
使用 `keras.utils.get_file` 函数从亚马逊 S3 存储桶中下载名为 "coast.jpg" 的图像,并使用 `keras.utils.load_img` 函数加载该图像。`plt.axis("off")` 和 `plt.imshow` 函数用于绘制该图像并关闭坐标轴。
3. 实例化模型
```python
from tensorflow.keras.applications import inception_v3
model = inception_v3.InceptionV3(weights='imagenet',include_top=False)
```
使用 Keras 库中的 InceptionV3 模型对图像进行处理。`weights='imagenet'` 表示使用预训练的权重,`include_top=False` 表示去掉模型的顶层(全连接层)。
4. 配置 DeepDream 损失
```python
layer_settings = {
"mixed4": 1.0,
"mixed5": 1.5,
"mixed6": 2.0,
"mixed7": 2.5,
}
outputs_dict = dict(
[(layer.name, layer.output) for layer in [model.get_layer(name) for name in layer_settings.keys()]]
)
feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict)
```
通过配置不同层对 DeepDream 损失的贡献来控制图像的风格。该代码块中的 `layer_settings` 字典定义了每层对损失的贡献,`outputs_dict` 变量将每层的输出保存到一个字典中,`feature_extractor` 变量实例化一个新模型来提取特征。
5. 定义损失函数
```python
import tensorflow as tf
def compute_loss(input_image):
features = feature_extractor(input_image)
loss = tf.zeros(shape=())
for name in features.keys():
coeff = layer_settings[name]
activation = features[name]
loss += coeff * tf.reduce_mean(tf.square(activation[:, 2:-2, 2:-2, :]))
return loss
```
定义了一个计算 DeepDream 损失的函数。该函数首先使用 `feature_extractor` 模型提取输入图像的特征,然后计算每层对损失的贡献并相加,最终返回总损失。
6. 梯度上升过程
```python
@tf.function
def gradient_ascent_step(image, learning_rate):
with tf.GradientTape() as tape:
tape.watch(image)
loss = compute_loss(image)
grads = tape.gradient(loss, image)
grads = tf.math.l2_normalize(grads)
image += learning_rate * grads
return loss, image
def gradient_ascent_loop(image, iterations, learning_rate, max_loss=None):
for i in range(iterations):
loss, image = gradient_ascent_step(image, learning_rate)
if max_loss is not None and loss > max_loss:
break
print(f"... Loss value at step {i}: {loss:.2f}")
return image
```
定义了一个用于实现梯度上升过程的函数。`gradient_ascent_step` 函数计算输入图像的损失和梯度,然后对图像进行梯度上升并返回更新后的图像和损失。`gradient_ascent_loop` 函数使用 `gradient_ascent_step` 函数实现多次迭代,每次迭代都会计算损失和梯度,并对输入图像进行更新。
7. 设置超参数
```python
step = 20.
num_octave = 3
octave_scale = 1.4
iterations = 30
max_loss = 15.
```
设置了一些 DeepDream 算法的超参数,例如梯度上升步长、金字塔层数、金字塔缩放比例、迭代次数和损失上限。
8. 图像处理
```python
import numpy as np
def preprocess_image(image_path):
img = keras.utils.load_img(image_path)
img = keras.utils.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = keras.applications.inception_v3.preprocess_input(img)
return img
def deprocess_image(img):
img = img.reshape((img.shape[1], img.shape[2], 3))
img /= 2.0
img += 0.5
img *= 255.
img = np.clip(img, 0, 255).astype("uint8")
return img
```
定义了两个函数,`preprocess_image` 函数将输入图像进行预处理,`deprocess_image` 函数将处理后的图像进行还原。
9. DeepDream 算法过程
```python
original_img = preprocess_image(base_image_path)
original_shape = original_img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
successive_shapes.append(shape)
successive_shapes = successive_shapes[::-1]
shrunk_original_img = tf.image.resize(original_img, successive_shapes[0])
img = tf.identity(original_img)
for i, shape in enumerate(successive_shapes):
print(f"Processing octave {i} with shape {shape}")
img = tf.image.resize(img, shape)
img = gradient_ascent_loop(
img, iterations=iterations, learning_rate=step, max_loss=max_loss
)
upscaled_shrunk_original_img = tf.image.resize(shrunk_original_img, shape)
same_size_original = tf.image.resize(original_img, shape)
lost_detail = same_size_original - upscaled_shrunk_original_img
img += lost_detail
shrunk_original_img = tf.image.resize(original_img, shape)
keras.utils.save_img("DeepDream.png", deprocess_image(img.numpy()))
```
使用预先定义的函数和变量实现了 DeepDream 算法的过程。首先对原始图像进行预处理,然后根据金字塔层数和缩放比例生成多个连续的图像,对每个图像进行梯度上升处理,最终将所有处理后的图像进行合并,并使用 `keras.utils.save_img` 函数保存最终结果。
import os import cv2 # 定义原始和更改大小后的文件夹路径 original_folder_path = 'E:\shujuji\Intel Image Classification\seg_train\seg_train/buildings' resized_folder_path = 'E:\shujuji\Intel Image Classification\seg_train\change/buildings' # 如果更改大小后的文件夹不存在,则创建它 if not os.path.exists(resized_folder_path): os.makedirs(resized_folder_path) # 循环遍历原始文件夹中的所有图像文件 for filename in os.listdir(original_folder_path): if filename.endswith('.jpg') or filename.endswith('.png') or filename.endswith('.jpeg'): # 读取原始图像 img = cv2.imread(os.path.join(original_folder_path, filename)) # 将图像大小更改为(224,224,3) img_resized = cv2.resize(img, (224, 224)) # 保存更改大小后的图像到新文件夹中,使用原始文件名 cv2.imwrite(os.path.join(resized_folder_path, filename), img_resized),对这段代码进行更改,使之满足要求
以下是修改后的代码,满足获取文件夹下所有文件夹的所有图片,更改图片的尺寸后生成文件夹,按照原始文件夹进行排布,文件名使用原始文件名的要求:
```python
import os
import cv2
# 定义函数,用于调整图片尺寸
def resize_image(image_path, output_path, new_size):
img = cv2.imread(image_path)
img_resized = cv2.resize(img, new_size)
cv2.imwrite(output_path, img_resized)
# 定义函数,用于获取指定文件夹下所有图片
def get_all_images(folder_path):
images = []
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith('.jpg') or file.endswith('.png') or file.endswith('.jpeg'):
images.append(os.path.join(root, file))
return images
# 定义函数,用于生成新文件夹并存储调整后的图片
def generate_resized_images(source_folder, target_folder, new_size):
# 获取所有图片
images = get_all_images(source_folder)
# 按照原始文件夹进行排布
for image_path in images:
relative_path = os.path.relpath(image_path, source_folder)
output_folder = os.path.join(target_folder, os.path.dirname(relative_path))
os.makedirs(output_folder, exist_ok=True)
output_path = os.path.join(output_folder, os.path.basename(image_path))
# 调整图片尺寸并存储
resize_image(image_path, output_path, new_size)
# 定义原始和更改大小后的文件夹路径
original_folder_path = r'E:\shujuji\Intel Image Classification\seg_train\seg_train\buildings'
resized_folder_path = r'E:\shujuji\Intel Image Classification\seg_train\change\buildings'
# 如果更改大小后的文件夹不存在,则创建它
if not os.path.exists(resized_folder_path):
os.makedirs(resized_folder_path)
# 调用函数,生成新文件夹并存储调整后的图片
new_size = (224, 224) # 调整后的图片尺寸为 224x224
generate_resized_images(original_folder_path, resized_folder_path, new_size)
```
在使用时,需要将代码中的 `original_folder_path` 替换为要处理的原始文件夹的路径,`resized_folder_path` 替换为生成的新文件夹的路径,`new_size` 替换为要调整的图片尺寸。代码会自动获取原始文件夹下所有图片,并按照原始文件夹进行排布,生成新文件夹并存储调整后的图片。新文件夹的文件名使用原始文件名。
阅读全文