feat: cosmic gw multiday

This commit is contained in:
Dustella 2025-02-20 18:08:31 +08:00
parent 010f89c7d9
commit 7cf201ff8c
Signed by: Dustella
GPG Key ID: 35AA0AA3DC402D5C
4 changed files with 574 additions and 498 deletions

3
.gitignore vendored
View File

@ -17,4 +17,5 @@ build
dist
*.spec
notebooks
passcode
passcode
data

View File

@ -2,6 +2,7 @@ from io import BytesIO
from quart import Blueprint, request, send_file
from matplotlib import pyplot as plt
from modules.cosmic.gravityw_multiday import GravityMultidayPlot
from modules.cosmic.gravityw_perday import CosmicGravitywPlot
from modules.cosmic.planetw_daily import cosmic_planetw_daily_plot
from quart.utils import run_sync
@ -47,3 +48,30 @@ async def single_render():
buf.seek(0)
return await send_file(buf, mimetype="image/png")
@cosmic_module.route("/render/gravity_wave/multiday")
async def multiday_render():
year = request.args.get("year", 2008)
start_day = request.args.get("start_day", 1)
end_day = request.args.get("end_day", 204)
mode = request.args.get("mode", "位温分布")
p: GravityMultidayPlot = await run_sync(GravityMultidayPlot)(year=int(year),
day_range=(start_day, end_day))
if mode == "布伦特-维萨拉频率分布":
await run_sync(p.plot_heatmap_tempNz)()
elif mode == "位温分布":
await run_sync(p.plot_heatmap_tempPtz)()
elif mode == "每月浮力频率变化趋势":
await run_sync(p.plot_floatage_trend)()
elif mode == "每月平均重力势能的折线图":
await run_sync(p.plot_monthly_energy)()
else:
raise ValueError("Invalid mode")
buf = BytesIO()
plt.savefig(buf, format="png")
buf.seek(0)
return await send_file(buf, mimetype="image/png")

View File

@ -1,6 +1,3 @@
# 这里全是重力波相关的
# 原始文件名 cosmic重力波多天.py
import os
import numpy as np
import pandas as pd
@ -9,297 +6,58 @@ from scipy.optimize import curve_fit
import netCDF4 as nc
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.font_manager as fm
from CONSTANT import DATA_BASEPATH
from modules.cosmic.gravityw_multiday_process import process_single_file
DAY_RANGE = (0, 204)
# 设置支持中文的字体
plt.rcParams['font.sans-serif'] = ['SimHei'] # 设置字体为微软雅黑
plt.rcParams['axes.unicode_minus'] = False # 正常显示负号
fm.fontManager.addfont("./SimHei.ttf")
plt.rcParams['font.sans-serif'] = ['Simhei'] # 设置字体为微软雅黑
# 定义处理单个文件的函数
# 主循环处理1到365个文件
def process_single_file(base_folder_path, i):
# 构建当前文件夹的路径
folder_name = f"atmPrf_repro2021_2008_00{i}" if i < 10 else f"atmPrf_repro2021_2008_0{i}"
folder_path = os.path.join(base_folder_path, folder_name)
# 检查文件夹是否存在
if os.path.exists(folder_path):
dfs = []
# 遍历文件夹中的文件
for file_name in os.listdir(folder_path):
if file_name.endswith('.0390_nc'):
finfo = os.path.join(folder_path, file_name)
print(f"正在处理文件: {finfo}")
try:
dataset = nc.Dataset(finfo, 'r')
# 提取变量数据
temp = dataset.variables['Temp'][:]
altitude = dataset.variables['MSL_alt'][:]
lat = dataset.variables['Lat'][:]
lon = dataset.variables['Lon'][:]
# 创建DataFrame
df = pd.DataFrame({
'Longitude': lon,
'Latitude': lat,
'Altitude': altitude,
'Temperature': temp
})
dataset.close()
# 剔除高度大于60的行
df = df[df['Altitude'] <= 60]
# 对每个文件的数据进行插值
alt_interp = np.linspace(
df['Altitude'].min(), df['Altitude'].max(), 3000)
f_alt = interp1d(
df['Altitude'], df['Altitude'], kind='linear', fill_value="extrapolate")
f_lon = interp1d(
df['Altitude'], df['Longitude'], kind='linear', fill_value="extrapolate")
f_lat = interp1d(
df['Altitude'], df['Latitude'], kind='linear', fill_value="extrapolate")
f_temp = interp1d(
df['Altitude'], df['Temperature'], kind='linear', fill_value="extrapolate")
# 计算插值结果
interpolated_alt = f_alt(alt_interp)
interpolated_lon = f_lon(alt_interp)
interpolated_lat = f_lat(alt_interp)
interpolated_temp = f_temp(alt_interp)
# 创建插值后的DataFrame
interpolated_df = pd.DataFrame({
'Altitude': interpolated_alt,
'Longitude': interpolated_lon,
'Latitude': interpolated_lat,
'Temperature': interpolated_temp
})
# 将插值后的DataFrame添加到列表中
dfs.append(interpolated_df)
except Exception as e:
print(f"处理文件 {finfo} 时出错: {e}")
# 按行拼接所有插值后的DataFrame
final_df = pd.concat(dfs, axis=0, ignore_index=True)
# 获取 DataFrame 的长度
num_rows = len(final_df)
# 生成一个每3000个数从0到2999的序列并重复
altitude_values = np.tile(
np.arange(3000), num_rows // 3000 + 1)[:num_rows]
# 将生成的值赋给 DataFrame 的 'Altitude' 列
final_df['Altitude'] = altitude_values
# 摄氏度换算开尔文
final_df['Temperature'] = final_df['Temperature'] + 273.15
def get_multiday_data(year=2008, day_range=DAY_RANGE):
base_folder_path = f"{DATA_BASEPATH.cosmic}/{year}"
all_mean_ktemp_Nz = []
all_mean_ktemp_Ptz = []
day_begin, day_end = day_range
for file_index in range(day_begin, day_end):
try:
mean_ktemp_Nz, mean_ktemp_Ptz = process_single_file(
base_folder_path, file_index)
if mean_ktemp_Nz is not None and mean_ktemp_Ptz is not None:
all_mean_ktemp_Nz.append(mean_ktemp_Nz)
all_mean_ktemp_Ptz.append(mean_ktemp_Ptz)
except ValueError as e:
print(
f"Error processing file index {file_index}: {e}, skipping this file.")
continue
# 筛选出纬度在30到40度之间的数据
latitude_filtered_df = final_df[(
final_df['Latitude'] >= 30) & (final_df['Latitude'] <= 40)]
# 划分经度网格20°的网格
lon_min, lon_max = latitude_filtered_df['Longitude'].min(
), latitude_filtered_df['Longitude'].max()
lon_bins = np.arange(lon_min, lon_max + 20, 20) # 创建经度网格边界
# 将数据分配到网格中
latitude_filtered_df['Longitude_Grid'] = np.digitize(
latitude_filtered_df['Longitude'], lon_bins) - 1
# 对相同高度的温度取均值忽略NaN
altitude_temperature_mean = latitude_filtered_df.groupby(
'Altitude')['Temperature'].mean().reset_index()
# 重命名列,使其更具可读性
altitude_temperature_mean.columns = ['Altitude', 'Mean_Temperature']
# 定义高度的范围这里从0到最短段
altitude_range = range(0, 3000)
all_heights_mean_temperature = [] # 用于存储所有高度下的温度均值结果
for altitude in altitude_range:
# 筛选出当前高度的所有数据
altitude_df = latitude_filtered_df[latitude_filtered_df['Altitude'] == altitude]
# 对Longitude_Grid同一区间的温度取均值
temperature_mean_by_grid = altitude_df.groupby(
'Longitude_Grid')['Temperature'].mean().reset_index()
# 重命名列,使其更具可读性
temperature_mean_by_grid.columns = [
'Longitude_Grid', 'Mean_Temperature']
# 添加高度信息列,方便后续区分不同高度的结果
temperature_mean_by_grid['Altitude'] = altitude
# 将当前高度的结果添加到列表中
all_heights_mean_temperature.append(temperature_mean_by_grid)
# 将所有高度的结果合并为一个DataFrame
combined_mean_temperature_df = pd.concat(
all_heights_mean_temperature, ignore_index=True)
# 基于Altitude列合并两个DataFrame只保留能匹配上的行
merged_df = pd.merge(combined_mean_temperature_df,
altitude_temperature_mean, on='Altitude', how='inner')
# 计算差值减去wn0的扰动
merged_df['Temperature_Difference'] = merged_df['Mean_Temperature_x'] - \
merged_df['Mean_Temperature_y']
# 按Altitude分组
grouped = merged_df.groupby('Altitude')
def single_harmonic(x, A, phi):
return A * np.sin(2 * np.pi / (18 / k) * x + phi)
# 初始化存储每个高度的最佳拟合参数、拟合曲线、残差值以及背景温度的字典
fit_results = {}
fitted_curves = {}
residuals = {}
background_temperatures = {}
for altitude, group in grouped:
y_data = group['Temperature_Difference'].values
x_data = np.arange(len(y_data))
wn0_data = group['Mean_Temperature_y'].values # 获取同一高度下的wn0数据
# 检查Temperature_Difference列是否全部为NaN
if np.all(np.isnan(y_data)):
fit_results[altitude] = {
'A': [np.nan] * 5, 'phi': [np.nan] * 5}
fitted_curves[altitude] = [np.nan * x_data] * 5
residuals[altitude] = np.nan * x_data
background_temperatures[altitude] = np.nan * x_data
else:
# 替换NaN值为非NaN值的均值
y_data = np.where(np.isnan(y_data), np.nanmean(y_data), y_data)
# 初始化存储WN参数和曲线的列表
wn_params = []
wn_curves = []
# 计算wn0使用Mean_Temperature_y列数据
wn0 = wn0_data
# 对WN1至WN5进行拟合
for k in range(1, 6):
# 更新单谐波函数中的k值
def harmonic_func(
x, A, phi): return single_harmonic(x, A, phi)
# 使用curve_fit进行拟合
popt, pcov = curve_fit(harmonic_func, x_data, y_data, p0=[
np.nanmax(y_data) - np.nanmin(y_data), 0])
A_fit, phi_fit = popt
# 存储拟合结果
wn_params.append({'A': A_fit, 'phi': phi_fit})
# 使用拟合参数生成拟合曲线
WN = harmonic_func(x_data, A_fit, phi_fit)
wn_curves.append(WN)
# 计算残差值
y_data = y_data - WN # 使用残差值作为下一次拟合的y_data
# 存储结果
fit_results[altitude] = wn_params
fitted_curves[altitude] = wn_curves
residuals[altitude] = y_data
# 计算同一高度下的背景温度wn0 + wn1 + wn2 + wn3 + wn4 + wn5
wn_sum = np.sum([wn0] + wn_curves, axis=0)
background_temperatures[altitude] = wn_sum
# 将每个字典转换成一个 DataFrame
df = pd.DataFrame(residuals)
# 使用前向填充(用上一个有效值填充 NaN
df.ffill(axis=1, inplace=True)
# 初始化一个新的字典来保存处理结果
result = {}
# 定义滤波范围
lambda_low = 2 # 2 km
lambda_high = 15 # 15 km
f_low = 2 * np.pi / lambda_high
f_high = 2 * np.pi / lambda_low
# 循环处理df的每一行每个高度
for idx, residuals_array in df.iterrows():
# 提取有效值
valid_values = np.ma.masked_array(
residuals_array, np.isnan(residuals_array))
compressed_values = valid_values.compressed() # 去除NaN值后的数组
N = len(compressed_values) # 有效值的数量
# 如果有效值为空即所有值都是NaN则将结果设置为NaN
if N == 0:
result[idx] = np.full_like(residuals_array, np.nan)
else:
# 时间序列和频率
dt = 0.02 # 假设的时间间隔
n = np.arange(N)
f = n / (N * dt)
# 傅里叶变换
y = np.fft.fft(compressed_values)
# 频率滤波
yy = y.copy()
freq_filter = (f >= f_low) & (f <= f_high)
yy[~freq_filter] = 0
# 逆傅里叶变换
perturbation_after = np.real(np.fft.ifft(yy))
# 将处理结果插回到result字典中
result[idx] = perturbation_after
# 处理背景温度和扰动温度数据格式
heights = list(background_temperatures.keys())
data_length = len(next(iter(background_temperatures.values())))
background_matrix = np.zeros((data_length, len(heights)))
for idx, height in enumerate(heights):
background_matrix[:, idx] = background_temperatures[height]
heights = list(result.keys())
data_length = len(next(iter(result.values())))
perturbation_matrix = np.zeros((data_length, len(heights)))
for idx, height in enumerate(heights):
perturbation_matrix[:, idx] = result[height]
perturbation_matrix = perturbation_matrix.T
# 计算 Brunt-Väisälä 频率和势能
heights_for_calc = np.linspace(0, 60, 3000) * 1000
def brunt_vaisala_frequency(g, BT_z, c_p, heights):
# 计算位温随高度的变化率
dBT_z_dz = np.gradient(BT_z, heights)
# 计算 Brunt-Väisälä 频率,根号内取绝对值
frequency_squared = (g / BT_z) * ((g / c_p) + dBT_z_dz)
frequency = np.sqrt(np.abs(frequency_squared))
return frequency
# 计算势能
def calculate_gravitational_potential_energy(g, BT_z, N_z, PT_z):
# 计算势能
return 0.5 * ((g / N_z) ** 2) * ((PT_z / BT_z) ** 2)
g = 9.81 # 重力加速度
c_p = 1004.5 # 比热容
N_z_matrix = []
PT_z_matrix = []
for i in range(background_matrix.shape[0]):
BT_z = np.array(background_matrix[i])
PT_z = np.array(perturbation_matrix[i])
N_z = brunt_vaisala_frequency(g, BT_z, c_p, heights_for_calc)
PW = calculate_gravitational_potential_energy(g, BT_z, N_z, PT_z)
N_z_matrix.append(N_z)
PT_z_matrix.append(PW)
ktemp_Nz = np.vstack(N_z_matrix)
ktemp_Ptz = np.vstack(PT_z_matrix)
mean_ktemp_Nz = np.mean(ktemp_Nz, axis=0)
mean_ktemp_Ptz = np.mean(ktemp_Ptz, axis=0)
return mean_ktemp_Nz, mean_ktemp_Ptz
else:
print(f"文件夹 {folder_path} 不存在。")
return None, None
# 主循环处理1到3个文件
base_folder_path = r"./data/cosmic/2008"
all_mean_ktemp_Nz = []
all_mean_ktemp_Ptz = []
for file_index in range(1, 365):
mean_ktemp_Nz, mean_ktemp_Ptz = process_single_file(
base_folder_path, file_index)
if mean_ktemp_Nz is not None and mean_ktemp_Ptz is not None:
all_mean_ktemp_Nz.append(mean_ktemp_Nz)
all_mean_ktemp_Ptz.append(mean_ktemp_Ptz)
# 转换每个数组为二维形状
final_mean_ktemp_Nz = np.vstack([arr.reshape(1, -1)
for arr in all_mean_ktemp_Nz])
final_mean_ktemp_Ptz = np.vstack(
[arr.reshape(1, -1) for arr in all_mean_ktemp_Ptz])
# 使用条件索引替换大于50的值为NaN
final_mean_ktemp_Ptz[final_mean_ktemp_Ptz > 50] = np.nan
# heights 为每个高度的值
heights = np.linspace(0, 60, 3000)
df_final_mean_ktemp_Ptz = pd.DataFrame(final_mean_ktemp_Ptz)
df_final_mean_ktemp_Nz = pd.DataFrame(final_mean_ktemp_Nz)
# -------------------------------------------------绘制年统计图------------------------------------
# -----------绘制浮力频率年统计图-----------------------
data = df_final_mean_ktemp_Nz.T
# 对每个元素进行平方计算N2
data = data ** 2
data = data*10000 # (绘图好看)
# 将大于 10 的值替换为 NaN个别异常值
data[data > 10] = np.nan
# 转换每个数组为二维形状
final_mean_ktemp_Nz = np.vstack([arr.reshape(1, -1)
for arr in all_mean_ktemp_Nz])
final_mean_ktemp_Ptz = np.vstack(
[arr.reshape(1, -1) for arr in all_mean_ktemp_Ptz])
# 使用条件索引替换大于50的值为NaN
final_mean_ktemp_Ptz[final_mean_ktemp_Ptz > 50] = np.nan
# heights 为每个高度的值
heights = np.linspace(0, 60, 3000)
df_final_mean_ktemp_Ptz = pd.DataFrame(final_mean_ktemp_Ptz)
df_final_mean_ktemp_Nz = pd.DataFrame(final_mean_ktemp_Nz)
# -------------------------------------------------绘制年统计图------------------------------------
# -----------绘制浮力频率年统计图-----------------------
data = df_final_mean_ktemp_Nz.T
# 对每个元素进行平方计算N2
data = data ** 2
data = data*10000 # (绘图好看)
# 将大于 10 的值替换为 NaN个别异常值
data[data > 10] = np.nan
return df_final_mean_ktemp_Nz, df_final_mean_ktemp_Ptz, data, heights
# 绘制热力图的函数
@ -324,218 +82,222 @@ def plot_heatmap(data, heights, title):
plt.show()
# 调用函数绘制热力图
plot_heatmap(data, heights, 'Heatmap of final_mean_ktemp_Nz10^(-4)')
# -----------------------------------------------------------------------------
# -------------绘制重力势能年统计图------------------------------------------------
data1 = df_final_mean_ktemp_Ptz.T
# 绘制热力图的函数
class GravityMultidayPlot:
def __init__(self, year, day_range):
self.year = year
df_final_mean_ktemp_Nz, df_final_mean_ktemp_Ptz, data, heights = get_multiday_data(
year, day_range)
self.df_final_mean_ktemp_Nz = df_final_mean_ktemp_Nz
self.df_final_mean_ktemp_Ptz = df_final_mean_ktemp_Ptz
self.data = data
self.data1 = df_final_mean_ktemp_Ptz.T
self.heights = heights
def plot_heatmap(data, heights, title):
plt.figure(figsize=(10, 8))
# 绘制热力图,数据中的行代表高度,列代表天数
sns.heatmap(data, cmap='coolwarm', xticklabels=1,
yticklabels=50, cbar_kws={'label': 'Value'})
plt.xlabel('Day')
plt.ylabel('Height (km)')
plt.title(title)
# 设置x轴的刻度使其每30天显示一个标签
num_days = data.shape[1]
x_tick_positions = np.arange(0, num_days, 30)
x_tick_labels = np.arange(0, num_days, 30)
plt.xticks(x_tick_positions, x_tick_labels)
# 设置y轴的刻度使其显示对应的高度
plt.yticks(np.linspace(
0, data.shape[0] - 1, 6), np.round(np.linspace(heights[0], heights[-1], 6), 2))
# 反转 y 轴,使 0 在底部
plt.gca().invert_yaxis()
plt.show()
def plot_heatmap_tempNz(self):
# 调用函数绘制热力图
data = self.data
plot_heatmap(data, self.heights,
'Heatmap of final_mean_ktemp_Nz10^(-4)')
def plot_heatmap_tempPtz(self):
# -------------绘制重力势能年统计图------------------------------------------------
data = self.df_final_mean_ktemp_Ptz.T
plot_heatmap(data, self.heights,
'Heatmap of final_mean_ktemp_Ptz(J/kg)')
# 调用函数绘制热力图
plot_heatmap(data1, heights, 'Heatmap of final_mean_ktemp_Ptz(J/kg)')
# ------------------------绘制月统计图---------------------------------------------------------------------------------
# ----------绘制浮力频率月统计图-------------------------------------------------
# 获取总列数
num_columns = data.shape[1]
# 按30列分组计算均值
averaged_df = []
# 逐步处理每30列
for i in range(0, num_columns, 30):
# 获取当前范围内的列,并计算均值
subset = data.iloc[:, i:i+30] # 获取第i到i+29列
mean_values = subset.mean(axis=1) # 对每行计算均值
averaged_df.append(mean_values) # 将均值添加到列表
# 将结果转化为一个新的 DataFrame
averaged_df = pd.DataFrame(averaged_df).T
# 1. 每3000行取一个均值
# 获取总行数
num_rows = averaged_df.shape[0]
# 创建一个新的列表来存储每3000行的均值
averaged_by_rows_df = []
# 逐步处理每3000行
for i in range(0, num_rows, 3000):
# 获取当前范围内的行
subset = averaged_df.iloc[i:i+3000, :] # 获取第i到i+99行
mean_values = subset.mean(axis=0) # 对每列计算均值
averaged_by_rows_df.append(mean_values) # 将均值添加到列表
# 将结果转化为一个新的 DataFrame
averaged_by_rows_df = pd.DataFrame(averaged_by_rows_df)
# 绘制折线图
plt.figure(figsize=(10, 6)) # 设置图形的大小
plt.plot(averaged_by_rows_df.columns, averaged_by_rows_df.mean(
axis=0), marker='o', color='b', label='平均值')
# 添加标题和标签
plt.title('每月平均N^2的折线图')
plt.xlabel('月份')
plt.ylabel('N^2(10^-4)')
plt.legend()
# 显示图形
plt.grid(True)
plt.xticks(rotation=45) # 让x轴标签月份倾斜以便更清晰显示
plt.tight_layout()
plt.show()
# ------------重力势能的月统计-----------------------------------
# 获取总列数
num_columns = data1.shape[1]
# 按30列分组计算均值
averaged_df = []
# 逐步处理每30列
for i in range(0, num_columns, 30):
# 获取当前范围内的列,并计算均值
subset = data1.iloc[:, i:i+30] # 获取第i到i+29列
mean_values = subset.mean(axis=1) # 对每行计算均值
averaged_df.append(mean_values) # 将均值添加到列表
# 将结果转化为一个新的 DataFrame
averaged_df = pd.DataFrame(averaged_df).T
# 1. 每3000行取一个均值
# 获取总行数
num_rows = averaged_df.shape[0]
# 创建一个新的列表来存储每3000行的均值
averaged_by_rows_df = []
# 逐步处理每3000行
for i in range(0, num_rows, 3000):
# 获取当前范围内的行
subset = averaged_df.iloc[i:i+3000, :] # 获取第i到i+99行
mean_values = subset.mean(axis=0) # 对每列计算均值
averaged_by_rows_df.append(mean_values) # 将均值添加到列表
# 将结果转化为一个新的 DataFrame
averaged_by_rows_df = pd.DataFrame(averaged_by_rows_df)
# 绘制折线图
plt.figure(figsize=(10, 6)) # 设置图形的大小
plt.plot(averaged_by_rows_df.columns, averaged_by_rows_df.mean(
axis=0), marker='o', color='b', label='平均值')
# 添加标题和标签
plt.title('每月平均重力势能的折线图')
plt.xlabel('月份')
plt.ylabel('重力势能J/Kg')
plt.legend()
# 显示图形
plt.grid(True)
plt.xticks(rotation=45) # 让x轴标签月份倾斜以便更清晰显示
plt.tight_layout()
plt.show()
def plot_monthly_tempNz(self):
# ------------------------绘制月统计图---------------------------------------------------------------------------------
# ----------绘制浮力频率月统计图-------------------------------------------------
# 获取总列数
data = self.data
num_columns = data.shape[1]
# 按30列分组计算均值
averaged_df = []
# 逐步处理每30列
for i in range(0, num_columns, 30):
# 获取当前范围内的列,并计算均值
subset = data.iloc[:, i:i+30] # 获取第i到i+29列
mean_values = subset.mean(axis=1) # 对每行计算均值
averaged_df.append(mean_values) # 将均值添加到列表
# 将结果转化为一个新的 DataFrame
averaged_df = pd.DataFrame(averaged_df).T
# 1. 每3000行取一个均值
# 获取总行数
num_rows = averaged_df.shape[0]
# 创建一个新的列表来存储每3000行的均值
averaged_by_rows_df = []
# 逐步处理每3000行
for i in range(0, num_rows, 3000):
# 获取当前范围内的行
subset = averaged_df.iloc[i:i+3000, :] # 获取第i到i+99行
mean_values = subset.mean(axis=0) # 对每列计算均值
averaged_by_rows_df.append(mean_values) # 将均值添加到列表
# 将结果转化为一个新的 DataFrame
averaged_by_rows_df = pd.DataFrame(averaged_by_rows_df)
# 绘制折线图
plt.figure(figsize=(10, 6)) # 设置图形的大小
plt.plot(averaged_by_rows_df.columns, averaged_by_rows_df.mean(
axis=0), marker='o', color='b', label='平均值')
# 添加标题和标签
plt.title('每月平均N^2的折线图')
plt.xlabel('月份')
plt.ylabel('N^2(10^-4)')
plt.legend()
# 显示图形
plt.grid(True)
plt.xticks(rotation=45) # 让x轴标签月份倾斜以便更清晰显示
plt.tight_layout()
plt.show()
def plot_monthly_energy(self):
data1 = self.data1
# 获取总列数
total_columns = data.shape[1]
# 用于存储每一组30列计算得到的均值列数据最终会构成新的DataFrame
mean_columns = []
# 分组序号用于生成列名时区分不同的均值列从1开始
group_index = 1
# 按照每30列一组进行划分不滑动
for start_col in range(0, total_columns, 30):
end_col = start_col + 30
if end_col > total_columns:
end_col = total_columns
# 选取当前组的30列如果不足30列按实际剩余列数选取
group_data = data.iloc[:, start_col:end_col]
# 按行对当前组的列数据求和
sum_per_row = group_data.sum(axis=1)
# 计算平均(每一组的平均,每行都有一个平均结果)
mean_per_row = sum_per_row / (end_col - start_col)
# 生成新的列名,格式为'Mean_分组序号',例如'Mean_1'、'Mean_2'等
new_column_name = f'Mean_{group_index}'
group_index += 1
# 将当前组计算得到的均值列添加到列表中
mean_columns.append(mean_per_row)
# 将所有的均值列合并为一个新的DataFrame列方向合并
new_mean_df = pd.concat(mean_columns, axis=1)
# 按行对new_mean_df所有列的数据进行求和得到一个Series索引与new_mean_df的索引一致每个元素是每行的总和
row_sums = new_mean_df.sum(axis=1)
# 计算所有行总和的均值
mean_value = row_sums.mean()
# 设置中文字体为黑体解决中文显示问题Windows系统下如果是其他系统或者有其他字体需求可适当调整
plt.rcParams['font.sans-serif'] = ['SimHei']
# 解决负号显示问题
plt.rcParams['axes.unicode_minus'] = False
# 提取月份作为x轴标签假设mean_value的索引就是月份信息
months = mean_value.index.tolist()
# 提取均值数据作为y轴数据
energy_values = mean_value.tolist()
# 创建图形和坐标轴对象
fig, ax = plt.subplots(figsize=(10, 6))
# 绘制折线图
ax.plot(months, energy_values, marker='o', linestyle='-', color='b')
# 设置坐标轴标签和标题
ax.set_xlabel('月份')
ax.set_ylabel('平均浮力频率')
ax.set_title('每月浮力频率变化趋势')
# 设置x轴刻度让其旋转一定角度以便更好地显示所有月份标签避免重叠
plt.xticks(rotation=45)
# 显示网格线,增强图表可读性
ax.grid(True)
# 显示图形
plt.show()
# --------------------------------绘制重力势能月统计图------------------------------
# 获取总列数
total_columns = data1.shape[1]
# 用于存储每一组30列计算得到的均值列数据最终会构成新的DataFrame
mean_columns = []
# 分组序号用于生成列名时区分不同的均值列从1开始
group_index = 1
# 按照每30列一组进行划分不滑动
for start_col in range(0, total_columns, 30):
end_col = start_col + 30
if end_col > total_columns:
end_col = total_columns
# 选取当前组的30列如果不足30列按实际剩余列数选取
group_data = data1.iloc[:, start_col:end_col]
# 按行对当前组的列数据求和
sum_per_row = group_data.sum(axis=1)
# 计算平均(每一组的平均,每行都有一个平均结果)
mean_per_row = sum_per_row / (end_col - start_col)
# 生成新的列名,格式为'Mean_分组序号',例如'Mean_1'、'Mean_2'等
new_column_name = f'Mean_{group_index}'
group_index += 1
# 将当前组计算得到的均值列添加到列表中
mean_columns.append(mean_per_row)
# 将所有的均值列合并为一个新的DataFrame列方向合并
new_mean_df = pd.concat(mean_columns, axis=1)
# 按行对new_mean_df所有列的数据进行求和得到一个Series索引与new_mean_df的索引一致每个元素是每行的总和
row_sums = new_mean_df.sum(axis=1)
# 计算所有行总和的均值
mean_value = row_sums.mean()
# 设置中文字体为黑体解决中文显示问题Windows系统下如果是其他系统或者有其他字体需求可适当调整
plt.rcParams['font.sans-serif'] = ['SimHei']
# 解决负号显示问题
plt.rcParams['axes.unicode_minus'] = False
# 提取月份作为x轴标签假设mean_value的索引就是月份信息
months = mean_value.index.tolist()
# 提取均值数据作为y轴数据
energy_values = mean_value.tolist()
# 创建图形和坐标轴对象
fig, ax = plt.subplots(figsize=(10, 6))
# 绘制折线图
ax.plot(months, energy_values, marker='o', linestyle='-', color='b')
# 设置坐标轴标签和标题
ax.set_xlabel('月份')
ax.set_ylabel('平均浮力频率')
ax.set_title('每月浮力频率变化趋势')
# 设置x轴刻度让其旋转一定角度以便更好地显示所有月份标签避免重叠
plt.xticks(rotation=45)
# 显示网格线,增强图表可读性
ax.grid(True)
# 显示图形
plt.show()
# ------------重力势能的月统计-----------------------------------
# 获取总列数
num_columns = data1.shape[1]
# 按30列分组计算均值
averaged_df = []
# 逐步处理每30列
for i in range(0, num_columns, 30):
# 获取当前范围内的列,并计算均值
subset = data1.iloc[:, i:i+30] # 获取第i到i+29列
mean_values = subset.mean(axis=1) # 对每行计算均值
averaged_df.append(mean_values) # 将均值添加到列表
# 将结果转化为一个新的 DataFrame
averaged_df = pd.DataFrame(averaged_df).T
# 1. 每3000行取一个均值
# 获取总行数
num_rows = averaged_df.shape[0]
# 创建一个新的列表来存储每3000行的均值
averaged_by_rows_df = []
# 逐步处理每3000行
for i in range(0, num_rows, 3000):
# 获取当前范围内的行
subset = averaged_df.iloc[i:i+3000, :] # 获取第i到i+99行
mean_values = subset.mean(axis=0) # 对每列计算均值
averaged_by_rows_df.append(mean_values) # 将均值添加到列表
# 将结果转化为一个新的 DataFrame
averaged_by_rows_df = pd.DataFrame(averaged_by_rows_df)
# 绘制折线图
plt.figure(figsize=(10, 6)) # 设置图形的大小
plt.plot(averaged_by_rows_df.columns, averaged_by_rows_df.mean(
axis=0), marker='o', color='b', label='平均值')
# 添加标题和标签
plt.title('每月平均重力势能的折线图')
plt.xlabel('月份')
plt.ylabel('重力势能J/Kg')
plt.legend()
# 显示图形
plt.grid(True)
plt.xticks(rotation=45) # 让x轴标签月份倾斜以便更清晰显示
plt.tight_layout()
plt.show()
def plot_floatage_trend(self):
data = self.data
# 获取总列数
total_columns = data.shape[1]
# 用于存储每一组30列计算得到的均值列数据最终会构成新的DataFrame
mean_columns = []
# 分组序号用于生成列名时区分不同的均值列从1开始
group_index = 1
# 按照每30列一组进行划分不滑动
for start_col in range(0, total_columns, 30):
end_col = start_col + 30
if end_col > total_columns:
end_col = total_columns
# 选取当前组的30列如果不足30列按实际剩余列数选取
group_data = data.iloc[:, start_col:end_col]
# 按行对当前组的列数据求和
sum_per_row = group_data.sum(axis=1)
# 计算平均(每一组的平均,每行都有一个平均结果)
mean_per_row = sum_per_row / (end_col - start_col)
# 生成新的列名,格式为'Mean_分组序号',例如'Mean_1'、'Mean_2'等
new_column_name = f'Mean_{group_index}'
group_index += 1
# 将当前组计算得到的均值列添加到列表中
mean_columns.append(mean_per_row)
# 将所有的均值列合并为一个新的DataFrame列方向合并
new_mean_df = pd.concat(mean_columns, axis=1)
# 按行对new_mean_df所有列的数据进行求和得到一个Series索引与new_mean_df的索引一致每个元素是每行的总和
row_sums = new_mean_df.sum(axis=1)
# 计算所有行总和的均值
mean_value = row_sums.mean()
# 设置中文字体为黑体解决中文显示问题Windows系统下如果是其他系统或者有其他字体需求可适当调整
plt.rcParams['font.sans-serif'] = ['SimHei']
# 解决负号显示问题
plt.rcParams['axes.unicode_minus'] = False
# 提取月份作为x轴标签假设mean_value的索引就是月份信息
print(mean_value)
months = mean_value.index.tolist()
# 提取均值数据作为y轴数据
energy_values = mean_value.tolist()
# 创建图形和坐标轴对象
fig, ax = plt.subplots(figsize=(10, 6))
# 绘制折线图
ax.plot(months, energy_values, marker='o', linestyle='-', color='b')
# 设置坐标轴标签和标题
ax.set_xlabel('月份')
ax.set_ylabel('平均浮力频率')
ax.set_title('每月浮力频率变化趋势')
# 设置x轴刻度让其旋转一定角度以便更好地显示所有月份标签避免重叠
plt.xticks(rotation=45)
# 显示网格线,增强图表可读性
ax.grid(True)
# 显示图形
plt.show()
def plot_floatage_trend(self):
data1 = self.data1
# --------------------------------绘制重力势能月统计图------------------------------
# 获取总列数
total_columns = data1.shape[1]
# 用于存储每一组30列计算得到的均值列数据最终会构成新的DataFrame
mean_columns = []
# 分组序号用于生成列名时区分不同的均值列从1开始
group_index = 1
# 按照每30列一组进行划分不滑动
for start_col in range(0, total_columns, 30):
end_col = start_col + 30
if end_col > total_columns:
end_col = total_columns
# 选取当前组的30列如果不足30列按实际剩余列数选取
group_data = data1.iloc[:, start_col:end_col]
# 按行对当前组的列数据求和
sum_per_row = group_data.sum(axis=1)
# 计算平均(每一组的平均,每行都有一个平均结果)
mean_per_row = sum_per_row / (end_col - start_col)
# 生成新的列名,格式为'Mean_分组序号',例如'Mean_1'、'Mean_2'等
new_column_name = f'Mean_{group_index}'
group_index += 1
# 将当前组计算得到的均值列添加到列表中
mean_columns.append(mean_per_row)
# 将所有的均值列合并为一个新的DataFrame列方向合并
new_mean_df = pd.concat(mean_columns, axis=1)
# 按行对new_mean_df所有列的数据进行求和得到一个Series索引与new_mean_df的索引一致每个元素是每行的总和
row_sums = new_mean_df.sum(axis=1)
# 计算所有行总和的均值
mean_value = row_sums.mean()
# 设置中文字体为黑体解决中文显示问题Windows系统下如果是其他系统或者有其他字体需求可适当调整
plt.rcParams['font.sans-serif'] = ['SimHei']
# 解决负号显示问题
plt.rcParams['axes.unicode_minus'] = False
# 提取月份作为x轴标签假设mean_value的索引就是月份信息
months = mean_value.index.tolist()
# 提取均值数据作为y轴数据
energy_values = mean_value.tolist()
# 创建图形和坐标轴对象
fig, ax = plt.subplots(figsize=(10, 6))
# 绘制折线图
ax.plot(months, energy_values, marker='o', linestyle='-', color='b')
# 设置坐标轴标签和标题
ax.set_xlabel('月份')
ax.set_ylabel('平均浮力频率')
ax.set_title('每月浮力频率变化趋势')
# 设置x轴刻度让其旋转一定角度以便更好地显示所有月份标签避免重叠
plt.xticks(rotation=45)
# 显示网格线,增强图表可读性
ax.grid(True)
# 显示图形
plt.show()

View File

@ -0,0 +1,285 @@
import os
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
import netCDF4 as nc
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.font_manager as fm
def process_single_file(base_folder_path, i):
# 构建当前文件夹的路径
if i < 10:
folder_name = f"atmPrf_repro2021_2008_00{i}" # 一位数前面加两个0
elif i < 100:
folder_name = f"atmPrf_repro2021_2008_0{i}" # 两位数前面加一个0
else:
folder_name = f"atmPrf_repro2021_2008_{i}" # 三位数不加0
folder_path = os.path.join(base_folder_path, folder_name)
# i should be day
cache_path_nz = f"{base_folder_path}/{folder_name}_mean_ktemp_Nz.npy"
cache_path_ptz = f"{base_folder_path}/{folder_name}_mean_ktemp_Ptz.npy"
if os.path.exists(cache_path_nz) and os.path.exists(cache_path_ptz):
mean_ktemp_Nz = np.load(cache_path_nz)
mean_ktemp_Ptz = np.load(cache_path_ptz)
print(f"Loaded cache for {folder_name}")
return mean_ktemp_Nz, mean_ktemp_Ptz
# 检查文件夹是否存在
if os.path.exists(folder_path):
dfs = []
# 遍历文件夹中的文件
for file_name in os.listdir(folder_path):
if file_name.endswith('.0390_nc'):
finfo = os.path.join(folder_path, file_name)
# print(f"正在处理文件: {finfo}")
try:
dataset = nc.Dataset(finfo, 'r')
# 提取变量数据
temp = dataset.variables['Temp'][:]
altitude = dataset.variables['MSL_alt'][:]
lat = dataset.variables['Lat'][:]
lon = dataset.variables['Lon'][:]
# 创建DataFrame
df = pd.DataFrame({
'Longitude': lon,
'Latitude': lat,
'Altitude': altitude,
'Temperature': temp
})
dataset.close()
# 剔除高度大于60的行
df = df[df['Altitude'] <= 60]
# 对每个文件的数据进行插值
alt_interp = np.linspace(
df['Altitude'].min(), df['Altitude'].max(), 3000)
f_alt = interp1d(
df['Altitude'], df['Altitude'], kind='linear', fill_value="extrapolate")
f_lon = interp1d(
df['Altitude'], df['Longitude'], kind='linear', fill_value="extrapolate")
f_lat = interp1d(
df['Altitude'], df['Latitude'], kind='linear', fill_value="extrapolate")
f_temp = interp1d(
df['Altitude'], df['Temperature'], kind='linear', fill_value="extrapolate")
# 计算插值结果
interpolated_alt = f_alt(alt_interp)
interpolated_lon = f_lon(alt_interp)
interpolated_lat = f_lat(alt_interp)
interpolated_temp = f_temp(alt_interp)
# 创建插值后的DataFrame
interpolated_df = pd.DataFrame({
'Altitude': interpolated_alt,
'Longitude': interpolated_lon,
'Latitude': interpolated_lat,
'Temperature': interpolated_temp
})
# 将插值后的DataFrame添加到列表中
dfs.append(interpolated_df)
except Exception as e:
print(f"处理文件 {finfo} 时出错: {e}")
# 按行拼接所有插值后的DataFrame
final_df = pd.concat(dfs, axis=0, ignore_index=True)
# 获取 DataFrame 的长度
num_rows = len(final_df)
# 生成一个每3000个数从0到2999的序列并重复
altitude_values = np.tile(
np.arange(3000), num_rows // 3000 + 1)[:num_rows]
# 将生成的值赋给 DataFrame 的 'Altitude' 列
final_df['Altitude'] = altitude_values
# 摄氏度换算开尔文
final_df['Temperature'] = final_df['Temperature'] + 273.15
# 筛选出纬度在30到40度之间的数据
latitude_filtered_df = final_df[(
final_df['Latitude'] >= 30) & (final_df['Latitude'] <= 40)]
# 划分经度网格20°的网格
lon_min, lon_max = latitude_filtered_df['Longitude'].min(
), latitude_filtered_df['Longitude'].max()
lon_bins = np.arange(lon_min, lon_max + 20, 20) # 创建经度网格边界
# 将数据分配到网格中
# latitude_filtered_df['Longitude_Grid'] = np.digitize(
# latitude_filtered_df['Longitude'], lon_bins) - 1
latitude_filtered_df.loc[:, 'Longitude_Grid'] = np.digitize(
latitude_filtered_df['Longitude'], lon_bins) - 1
# 对相同高度的温度取均值忽略NaN
altitude_temperature_mean = latitude_filtered_df.groupby(
'Altitude')['Temperature'].mean().reset_index()
# 重命名列,使其更具可读性
altitude_temperature_mean.columns = ['Altitude', 'Mean_Temperature']
# 定义高度的范围这里从0到最短段
altitude_range = range(0, 3000)
all_heights_mean_temperature = [] # 用于存储所有高度下的温度均值结果
for altitude in altitude_range:
# 筛选出当前高度的所有数据
altitude_df = latitude_filtered_df[latitude_filtered_df['Altitude'] == altitude]
# 对Longitude_Grid同一区间的温度取均值
temperature_mean_by_grid = altitude_df.groupby(
'Longitude_Grid')['Temperature'].mean().reset_index()
# 重命名列,使其更具可读性
temperature_mean_by_grid.columns = [
'Longitude_Grid', 'Mean_Temperature']
# 添加高度信息列,方便后续区分不同高度的结果
temperature_mean_by_grid['Altitude'] = altitude
# 将当前高度的结果添加到列表中
all_heights_mean_temperature.append(temperature_mean_by_grid)
# 将所有高度的结果合并为一个DataFrame
combined_mean_temperature_df = pd.concat(
all_heights_mean_temperature, ignore_index=True)
# 基于Altitude列合并两个DataFrame只保留能匹配上的行
merged_df = pd.merge(combined_mean_temperature_df,
altitude_temperature_mean, on='Altitude', how='inner')
# 计算差值减去wn0的扰动
merged_df['Temperature_Difference'] = merged_df['Mean_Temperature_x'] - \
merged_df['Mean_Temperature_y']
# 按Altitude分组
grouped = merged_df.groupby('Altitude')
def single_harmonic(x, A, phi):
return A * np.sin(2 * np.pi / (18 / k) * x + phi)
# 初始化存储每个高度的最佳拟合参数、拟合曲线、残差值以及背景温度的字典
fit_results = {}
fitted_curves = {}
residuals = {}
background_temperatures = {}
for altitude, group in grouped:
y_data = group['Temperature_Difference'].values
x_data = np.arange(len(y_data))
wn0_data = group['Mean_Temperature_y'].values # 获取同一高度下的wn0数据
# 检查Temperature_Difference列是否全部为NaN
if np.all(np.isnan(y_data)):
fit_results[altitude] = {
'A': [np.nan] * 5, 'phi': [np.nan] * 5}
fitted_curves[altitude] = [np.nan * x_data] * 5
residuals[altitude] = np.nan * x_data
background_temperatures[altitude] = np.nan * x_data
else:
# 替换NaN值为非NaN值的均值
y_data = np.where(np.isnan(y_data), np.nanmean(y_data), y_data)
# 初始化存储WN参数和曲线的列表
wn_params = []
wn_curves = []
# 计算wn0使用Mean_Temperature_y列数据
wn0 = wn0_data
# 对WN1至WN5进行拟合
for k in range(1, 6):
# 更新单谐波函数中的k值
def harmonic_func(
x, A, phi): return single_harmonic(x, A, phi)
# 使用curve_fit进行拟合
popt, pcov = curve_fit(harmonic_func, x_data, y_data, p0=[
np.nanmax(y_data) - np.nanmin(y_data), 0])
A_fit, phi_fit = popt
# 存储拟合结果
wn_params.append({'A': A_fit, 'phi': phi_fit})
# 使用拟合参数生成拟合曲线
WN = harmonic_func(x_data, A_fit, phi_fit)
wn_curves.append(WN)
# 计算残差值
y_data = y_data - WN # 使用残差值作为下一次拟合的y_data
# 存储结果
fit_results[altitude] = wn_params
fitted_curves[altitude] = wn_curves
residuals[altitude] = y_data
# 计算同一高度下的背景温度wn0 + wn1 + wn2 + wn3 + wn4 + wn5
wn_sum = np.sum([wn0] + wn_curves, axis=0)
background_temperatures[altitude] = wn_sum
# 将每个字典转换成一个 DataFrame
df = pd.DataFrame(residuals)
# 使用前向填充(用上一个有效值填充 NaN
df.ffill(axis=1, inplace=True)
# 初始化一个新的字典来保存处理结果
result = {}
# 定义滤波范围
lambda_low = 2 # 2 km
lambda_high = 15 # 15 km
f_low = 2 * np.pi / lambda_high
f_high = 2 * np.pi / lambda_low
# 循环处理df的每一行每个高度
for idx, residuals_array in df.iterrows():
# 提取有效值
valid_values = np.ma.masked_array(
residuals_array, np.isnan(residuals_array))
compressed_values = valid_values.compressed() # 去除NaN值后的数组
N = len(compressed_values) # 有效值的数量
# 如果有效值为空即所有值都是NaN则将结果设置为NaN
if N == 0:
result[idx] = np.full_like(residuals_array, np.nan)
else:
# 时间序列和频率
dt = 0.02 # 假设的时间间隔
n = np.arange(N)
f = n / (N * dt)
# 傅里叶变换
y = np.fft.fft(compressed_values)
# 频率滤波
yy = y.copy()
freq_filter = (f >= f_low) & (f <= f_high)
yy[~freq_filter] = 0
# 逆傅里叶变换
perturbation_after = np.real(np.fft.ifft(yy))
# 将处理结果插回到result字典中
result[idx] = perturbation_after
# 处理背景温度和扰动温度数据格式
heights = list(background_temperatures.keys())
data_length = len(next(iter(background_temperatures.values())))
background_matrix = np.zeros((data_length, len(heights)))
for idx, height in enumerate(heights):
background_matrix[:, idx] = background_temperatures[height]
heights = list(result.keys())
data_length = len(next(iter(result.values())))
perturbation_matrix = np.zeros((data_length, len(heights)))
for idx, height in enumerate(heights):
perturbation_matrix[:, idx] = result[height]
perturbation_matrix = perturbation_matrix.T
# 计算 Brunt-Väisälä 频率和势能
heights_for_calc = np.linspace(0, 60, 3000) * 1000
def brunt_vaisala_frequency(g, BT_z, c_p, heights):
# 计算位温随高度的变化率
dBT_z_dz = np.gradient(BT_z, heights)
# 计算 Brunt-Väisälä 频率,根号内取绝对值
frequency_squared = (g / BT_z) * ((g / c_p) + dBT_z_dz)
frequency = np.sqrt(np.abs(frequency_squared))
return frequency
# 计算势能
def calculate_gravitational_potential_energy(g, BT_z, N_z, PT_z):
# 计算势能
return 0.5 * ((g / N_z) ** 2) * ((PT_z / BT_z) ** 2)
g = 9.81 # 重力加速度
c_p = 1004.5 # 比热容
N_z_matrix = []
PT_z_matrix = []
for i in range(background_matrix.shape[0]):
BT_z = np.array(background_matrix[i])
PT_z = np.array(perturbation_matrix[i])
N_z = brunt_vaisala_frequency(g, BT_z, c_p, heights_for_calc)
PW = calculate_gravitational_potential_energy(g, BT_z, N_z, PT_z)
N_z_matrix.append(N_z)
PT_z_matrix.append(PW)
ktemp_Nz = np.vstack(N_z_matrix)
ktemp_Ptz = np.vstack(PT_z_matrix)
mean_ktemp_Nz = np.mean(ktemp_Nz, axis=0)
mean_ktemp_Ptz = np.mean(ktemp_Ptz, axis=0)
# save to cache
np.save(cache_path_nz, mean_ktemp_Nz)
np.save(cache_path_ptz, mean_ktemp_Ptz)
return mean_ktemp_Nz, mean_ktemp_Ptz
else:
print(f"文件夹 {folder_path} 不存在。")
return None, None