导入(import)Tensorflow/PyTorch、NumPy和MatPlotlib库,编写并运行一个Python程序,分别使用CPU和GPU进行大量线性运算,分析运行速度。
时间: 2024-09-23 10:14:10 浏览: 25
在Python中,我们通常会通过`import`语句引入所需的库,如TensorFlow、PyTorch、NumPy和Matplotlib。以下是一个简单的例子,我们将展示如何导入这些库,并在CPU和GPU上执行线性运算来比较性能。
```python
# 引入所需库
import tensorflow as tf
import torch
import numpy as np
from matplotlib import pyplot as plt
# 检查是否安装了GPU支持
if torch.cuda.is_available():
device = "cuda" # 使用GPU
else:
device = "cpu" # 使用CPU
print(f"Using {device} for computations")
# 生成大量的随机数据 (这里假设10万行)
n_elements = 100000
data = np.random.rand(n_elements)
# 对于TensorFlow(CPU)
tf_data = tf.convert_to_tensor(data, dtype=tf.float32)
with tf.device("CPU:0"):
cpu_start_time = time.time()
tf_cpu_result = tf.matmul(tf_data, tf_data.T) # 线性运算
cpu_end_time = time.time()
cpu_time = cpu_end_time - cpu_start_time
print(f"CPU Time: {cpu_time:.2f} seconds")
# 对于PyTorch(CPU/GPU)
torch_device = torch.device(device)
torch_data = torch.tensor(data, device=torch_device)
with torch.no_grad():
torch_start_time = time.time()
torch_cpu_result = torch.matmul(torch_data, torch_data.t()) # 线性运算
torch_end_time = time.time()
torch_time = torch_end_time - torch_start_time
print(f"PyTorch ({device}) Time: {torch_time:.2f} seconds")
# 如果有GPU可用,再做一次GPU运算
if device == "cuda":
with torch.no_grad():
torch_gpu_start_time = time.time()
torch_gpu_result = torch.matmul(torch_data, torch_data.t().to(device)) # 将数据移到GPU
torch_gpu_end_time = time.time()
gpu_time = torch_gpu_end_time - torch_gpu_start_time
print(f"PyTorch (GPU) Time: {gpu_time:.2f} seconds")
# 可视化结果
plt.figure(figsize=(10, 4))
plt.bar(['CPU', 'GPU' if device == 'cuda' else 'CPU'], [cpu_time, gpu_time])
plt.title('Linear Operation Speed Comparison')
plt.xlabel('Platform')
plt.ylabel('Time (seconds)')
plt.show()