PINN求解一维Sod代码
时间: 2023-07-09 13:44:54 浏览: 163
PINN_Simple:探索一维中的PINN实现
好的,以下是使用 PINN 求解一维 Sod 问题的代码示例,注释中有详细的说明:
```python
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 定义问题参数
gamma = 1.4
x_left = 0.0
x_right = 1.0
t_final = 0.2
num_points = 1000
# 定义模型参数
num_hidden_layers = 4
num_neurons_per_layer = 20
learning_rate = 0.001
# 定义模型
class SodPINN(tf.keras.Model):
def __init__(self, num_hidden_layers, num_neurons_per_layer):
super(SodPINN, self).__init__()
self.num_hidden_layers = num_hidden_layers
self.num_neurons_per_layer = num_neurons_per_layer
# 定义输入层
self.input_layer = tf.keras.layers.InputLayer(input_shape=(2,))
# 定义隐藏层
self.hidden_layers = []
for i in range(self.num_hidden_layers):
self.hidden_layers.append(tf.keras.layers.Dense(self.num_neurons_per_layer, activation=tf.nn.tanh))
# 定义输出层
self.output_layer = tf.keras.layers.Dense(3)
def call(self, inputs):
x = self.input_layer(inputs)
for hidden_layer in self.hidden_layers:
x = hidden_layer(x)
return self.output_layer(x)
# 定义边界条件
def initial_value(x):
rho = tf.where(x < 0.5, 1.0, 0.125)
u = tf.where(x < 0.5, 0.0, 0.0)
p = tf.where(x < 0.5, 1.0, 0.1)
return tf.concat([rho[:, None], u[:, None], p[:, None]], axis=1)
def left_boundary(x):
rho = tf.ones_like(x[:, 0:1])
u = tf.zeros_like(x[:, 0:1])
p = tf.ones_like(x[:, 0:1])
return tf.concat([rho, u, p], axis=1)
def right_boundary(x):
rho = tf.where(x[:, 0:1] < 0.5, 1.0, 0.125)
u = tf.zeros_like(x[:, 0:1])
p = tf.where(x[:, 0:1] < 0.5, 1.0, 0.1)
return tf.concat([rho, u, p], axis=1)
# 定义损失函数
def get_loss(model, inputs, outputs):
x = inputs[:, 0:1]
t = inputs[:, 1:2]
rho = outputs[:, 0:1]
u = outputs[:, 1:2]
p = outputs[:, 2:3]
# 定义物理参数
e = p / (gamma - 1) + 0.5 * rho * u ** 2
H = e + p / rho
c = tf.sqrt(gamma * p / rho)
M = u / c
# 计算偏导数
with tf.GradientTape(persistent=True) as tape:
tape.watch(inputs)
tape.watch(outputs)
f = model(inputs)
rho_f = f[:, 0:1]
u_f = f[:, 1:2]
p_f = f[:, 2:3]
e_f = p_f / (gamma - 1) + 0.5 * rho_f * u_f ** 2
H_f = e_f + p_f / rho_f
c_f = tf.sqrt(gamma * p_f / rho_f)
M_f = u_f / c_f
rho_x = tape.gradient(rho, x)
u_x = tape.gradient(u, x)
p_x = tape.gradient(p, x)
e_x = u_x * (e + p) - u * (rho * u_x + rho_x * u) + p_x
H_x = (e_x + p_x) / rho + u_x * H - u * (rho * u_x + rho_x * u) / rho ** 2 * H
c_x = 0.5 / c * (gamma * p_x / rho - gamma * p / rho ** 2 * rho_x)
M_x = (u_x * c - u * c_x) / c ** 2
# 计算损失
loss = tf.reduce_mean((rho_f * u - rho * u_f) ** 2) \
+ tf.reduce_mean((rho_f * u_f ** 2 + p_f - rho * u * (u_f + M_f / M * c_f)) ** 2) \
+ tf.reduce_mean((rho_f * u_f * H_f - (e + p) * u_f + p_f * u - rho * u * H * (u_f + M_f / M * c_f)) ** 2) \
+ tf.reduce_mean((rho_f * u_f * M_f - rho * u * M) ** 2) \
+ tf.reduce_mean((rho_x * u - rho * u_x) ** 2) \
+ tf.reduce_mean((rho_x * u_x ** 2 + p_x - rho * u * (u_x + M_x / M * c_x)) ** 2) \
+ tf.reduce_mean((rho_x * u_x * H_x - (e + p) * u_x + p_x * u - rho * u * H * (u_x + M_x / M * c_x)) ** 2) \
+ tf.reduce_mean((rho_x * u_x * M_x - rho * u * M) ** 2) \
+ tf.reduce_mean((left_boundary(inputs) - initial_value(inputs)) ** 2) \
+ tf.reduce_mean((right_boundary(inputs) - f[-num_points:, :]) ** 2)
return loss
# 定义训练函数
def train(model, inputs, outputs, learning_rate, num_epochs, batch_size):
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
num_batches = int(inputs.shape[0] / batch_size)
loss_history = []
for epoch in range(num_epochs):
inputs, outputs = shuffle_data(inputs, outputs)
epoch_loss = 0.0
for i in range(num_batches):
x_batch = inputs[i*batch_size:(i+1)*batch_size, :]
y_batch = outputs[i*batch_size:(i+1)*batch_size, :]
with tf.GradientTape() as tape:
loss = get_loss(model, x_batch, y_batch)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
epoch_loss += loss / num_batches
if epoch % 10 == 0:
print('Epoch {}: Loss = {}'.format(epoch, epoch_loss))
loss_history.append(epoch_loss)
return loss_history
# 生成输入数据
x = np.linspace(x_left, x_right, num_points)
t = np.linspace(0, t_final, num_points)
x_mesh, t_mesh = np.meshgrid(x, t)
inputs = np.concatenate([x_mesh.reshape(-1, 1), t_mesh.reshape(-1, 1)], axis=1)
# 生成初始值和边界条件
outputs = initial_value(inputs)
# 训练模型
model = SodPINN(num_hidden_layers, num_neurons_per_layer)
loss_history = train(model, inputs, outputs, learning_rate, num_epochs=1000, batch_size=100)
# 绘制损失函数曲线
plt.plot(loss_history)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
# 可视化结果
rho = model(inputs)[:, 0].numpy().reshape(num_points, num_points)
u = model(inputs)[:, 1].numpy().reshape(num_points, num_points)
p = model(inputs)[:, 2].numpy().reshape(num_points, num_points)
plt.figure(figsize=(10, 6))
plt.subplot(1, 3, 1)
plt.imshow(rho, extent=[x_left, x_right, 0, t_final], cmap='jet')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('t')
plt.title('Density')
plt.subplot(1, 3, 2)
plt.imshow(u, extent=[x_left, x_right, 0, t_final], cmap='jet')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('t')
plt.title('Velocity')
plt.subplot(1, 3, 3)
plt.imshow(p, extent=[x_left, x_right, 0, t_final], cmap='jet')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('t')
plt.title('Pressure')
plt.tight_layout()
plt.show()
```
需要注意的是,上述示例代码只是一个简单的 PINN 实现,可能需要根据具体问题进行修改和优化。另外,也可以结合其他方法(比如 Riemann 求解器)来进一步提升求解效果。
阅读全文