renaming all modules to right
This commit is contained in:
parent
f09f8d415b
commit
6565fccd25
@ -1,3 +1,6 @@
|
||||
# 探空气球只有重力波
|
||||
|
||||
|
||||
import base64
|
||||
from urllib import response
|
||||
from quart import Blueprint
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
# 探空气球只有重力波
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
# 探空气球只有重力波
|
||||
|
||||
from dataclasses import dataclass
|
||||
from io import BytesIO
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
# 探空气球只有重力波
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.gridspec as gridspec
|
||||
import numpy as np
|
||||
@ -11,10 +13,12 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
return False
|
||||
|
||||
data.loc[:, "date"] = data["file_name"].str[:15]
|
||||
filtered_df = data[["date"] + [col for col in data.columns if col != "file_name" and col != "date"]]
|
||||
filtered_df = data[[
|
||||
"date"] + [col for col in data.columns if col != "file_name" and col != "date"]]
|
||||
filtered_df.reset_index(drop=True, inplace=True)
|
||||
|
||||
filtered_df = filtered_df.drop_duplicates(subset="date", keep="last") # 使用 drop_duplicates 函数,保留每组重复中的最后一个记录
|
||||
filtered_df = filtered_df.drop_duplicates(
|
||||
subset="date", keep="last") # 使用 drop_duplicates 函数,保留每组重复中的最后一个记录
|
||||
filtered_df.reset_index(drop=True, inplace=True)
|
||||
|
||||
filtered_df = filtered_df[filtered_df["w_f"] < 10] # 筛选 w_f 值小于 10 的数据
|
||||
@ -22,7 +26,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
# todo:1-删除不合理的数据
|
||||
# 1.先剔除明显异常的值
|
||||
for column in filtered_df.columns[1:]: # 不考虑第一列日期列
|
||||
filtered_df = filtered_df[(filtered_df[column] >= -9999) & (filtered_df[column] <= 9999)] # 460
|
||||
filtered_df = filtered_df[(
|
||||
filtered_df[column] >= -9999) & (filtered_df[column] <= 9999)] # 460
|
||||
|
||||
# 2.再用四分位数法,适合所有数据集
|
||||
def remove_outliers_iqr(df):
|
||||
@ -37,7 +42,7 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
df = df[(df[column] >= lower_bound) & (df[column] <= upper_bound)]
|
||||
return df
|
||||
|
||||
filtered_df = remove_outliers_iqr(filtered_df) # 408
|
||||
filtered_df = remove_outliers_iqr(filtered_df) # 408
|
||||
|
||||
# 画图
|
||||
|
||||
@ -63,7 +68,7 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
|
||||
ax10 = []
|
||||
for i in range(0, 10, 3):
|
||||
ax10.append(fig.add_subplot(gs[3, i : i + 3], projection="windrose"))
|
||||
ax10.append(fig.add_subplot(gs[3, i: i + 3], projection="windrose"))
|
||||
|
||||
sns.set_theme(style="whitegrid", font="SimHei") # 设置绘图样式为白色背景和网格线
|
||||
|
||||
@ -75,7 +80,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
# 创建bins的边界
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
|
||||
sns.histplot(filtered_df["w_f"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax1) # 加上stat='percent'表示算的是频率
|
||||
sns.histplot(filtered_df["w_f"], bins=bins, kde=False, edgecolor="black",
|
||||
stat="percent", ax=ax1) # 加上stat='percent'表示算的是频率
|
||||
# 设置x轴范围
|
||||
ax1.set_xlim(min_val, max_val)
|
||||
# 添加标题和标签
|
||||
@ -96,7 +102,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 1
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
|
||||
sns.histplot(filtered_df["zhou_qi"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax2) # 加上stat='percent'表示算的是频率
|
||||
sns.histplot(filtered_df["zhou_qi"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax2) # 加上stat='percent'表示算的是频率
|
||||
# 设置x轴范围
|
||||
ax2.set_xlim(min_val, max_val)
|
||||
# 添加标题和标签
|
||||
@ -111,7 +118,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 0.5
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
|
||||
sns.histplot(filtered_df["ver_wave_len"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax3) # 加上stat='percent'表示算的是频率
|
||||
sns.histplot(filtered_df["ver_wave_len"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax3) # 加上stat='percent'表示算的是频率
|
||||
# 设置x轴范围
|
||||
ax3.set_xlim(min_val, max_val)
|
||||
# 添加标题和标签
|
||||
@ -126,7 +134,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 100
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
|
||||
sns.histplot(filtered_df["hori_wave_len"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax4) # 加上stat='percent'表示算的是频率
|
||||
sns.histplot(filtered_df["hori_wave_len"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax4) # 加上stat='percent'表示算的是频率
|
||||
# 设置x轴范围
|
||||
ax4.set_xlim(min_val, max_val)
|
||||
# 添加标题和标签
|
||||
@ -143,7 +152,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 10
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
# 创建纬向直方图
|
||||
sns.histplot(filtered_df["c_x"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax5_1)
|
||||
sns.histplot(filtered_df["c_x"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax5_1)
|
||||
ax5_1.set_xlim(min_val, max_val)
|
||||
ax5_1.set_title("纬向本征相速度", fontsize=24)
|
||||
ax5_1.set_xlabel("Zonal phase speed (m/s)")
|
||||
@ -156,7 +166,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 10
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
# 创建经向直方图
|
||||
sns.histplot(filtered_df["c_y"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax5_2)
|
||||
sns.histplot(filtered_df["c_y"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax5_2)
|
||||
ax5_2.set_xlim(min_val, max_val)
|
||||
ax5_2.set_title("经向本征相速度", fontsize=24)
|
||||
ax5_2.set_xlabel("Meridional phase speed (m/s)")
|
||||
@ -169,7 +180,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 0.1
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
# 创建垂直直方图
|
||||
sns.histplot(filtered_df["c_z"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax5_3)
|
||||
sns.histplot(filtered_df["c_z"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax5_3)
|
||||
ax5_3.set_xlim(min_val, max_val)
|
||||
ax5_3.set_title("垂直本征相速度", fontsize=24)
|
||||
ax5_3.set_xlabel("Vertical phase speed (m/s)")
|
||||
@ -184,7 +196,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 0.5
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
# 创建纬向直方图
|
||||
sns.histplot(filtered_df["u1"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax6_1)
|
||||
sns.histplot(filtered_df["u1"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax6_1)
|
||||
ax6_1.set_xlim(min_val, max_val)
|
||||
ax6_1.set_title(" ", fontsize=24)
|
||||
ax6_1.set_xlabel("Zonal wind amplitude (m/s)")
|
||||
@ -197,7 +210,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 0.5
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
# 创建经向直方图
|
||||
sns.histplot(filtered_df["v1"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax6_2)
|
||||
sns.histplot(filtered_df["v1"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax6_2)
|
||||
ax6_2.set_xlim(min_val, max_val)
|
||||
ax6_2.set_title("扰动振幅统计结果", fontsize=24)
|
||||
ax6_2.set_xlabel("Meridional wind amplitude (m/s)")
|
||||
@ -210,7 +224,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 0.5
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
# 创建垂直直方图
|
||||
sns.histplot(filtered_df["T1"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax6_3)
|
||||
sns.histplot(filtered_df["T1"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax6_3)
|
||||
ax6_3.set_xlim(min_val, max_val)
|
||||
ax6_3.set_title(" ", fontsize=24)
|
||||
ax6_3.set_xlabel("Temperature amplitude (K)")
|
||||
@ -227,7 +242,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 0.1
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
|
||||
sns.histplot(filtered_df1["MFu"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax7_1)
|
||||
sns.histplot(filtered_df1["MFu"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax7_1)
|
||||
ax7_1.set_xlim(min_val, max_val) # 设置x轴范围
|
||||
ax7_1.set_title("纬向动量通量统计结果", fontsize=24) # 添加标题
|
||||
ax7_1.set_xlabel("Zonal momentum flux(mPa)") # x轴标签
|
||||
@ -240,7 +256,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
bin_width = 0.1
|
||||
bins = np.arange(min_val, max_val + bin_width, bin_width)
|
||||
|
||||
sns.histplot(filtered_df1["MFv"], bins=bins, kde=False, edgecolor="black", stat="percent", ax=ax7_2)
|
||||
sns.histplot(filtered_df1["MFv"], bins=bins, kde=False,
|
||||
edgecolor="black", stat="percent", ax=ax7_2)
|
||||
ax7_2.set_xlim(min_val, max_val) # 设置x轴范围
|
||||
ax7_2.set_title("经向动量通量统计结果", fontsize=24) # 添加标题
|
||||
ax7_2.set_xlabel("Meridional momentum flux(mPa)") # x轴标签
|
||||
@ -248,8 +265,10 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
|
||||
# 10、水平传播方向
|
||||
|
||||
filtered_df["date1"] = filtered_df["date"].str.split("T").str[0] # 再加一列,只保留日期部分
|
||||
filtered_df["date1"] = pd.to_datetime(filtered_df["date1"], format="%Y%m%d") # 确保 'date1' 列是日期格式
|
||||
filtered_df["date1"] = filtered_df["date"].str.split(
|
||||
"T").str[0] # 再加一列,只保留日期部分
|
||||
filtered_df["date1"] = pd.to_datetime(
|
||||
filtered_df["date1"], format="%Y%m%d") # 确保 'date1' 列是日期格式
|
||||
|
||||
# 添加季节列
|
||||
def get_season(date):
|
||||
@ -270,7 +289,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
season_data = filtered_df[filtered_df["season"] == season]
|
||||
windrose = WindroseAxes.from_ax(ax)
|
||||
ax.set_title(season, fontsize=18)
|
||||
windrose.bar(season_data["b"], np.ones_like(season_data["b"]), normed=False) # normed=True表示占每个季节的占比
|
||||
windrose.bar(season_data["b"], np.ones_like(
|
||||
season_data["b"]), normed=False) # normed=True表示占每个季节的占比
|
||||
|
||||
# # 添加总标题
|
||||
# fig.suptitle("水平传播方向在各个季节的分布变化", fontsize=16, fontweight="bold")
|
||||
@ -292,8 +312,10 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
dates = monthly_avg_stats_df.index.to_numpy()
|
||||
|
||||
# 绘制折线图
|
||||
ax8.plot(dates, monthly_avg_stats_df["Upload (%)"].to_numpy(), marker="o", label="Up (%)")
|
||||
ax8.plot(dates, monthly_avg_stats_df["Downward (%)"].to_numpy(), marker="o", label="Down (%)")
|
||||
ax8.plot(dates, monthly_avg_stats_df["Upload (%)"].to_numpy(
|
||||
), marker="o", label="Up (%)")
|
||||
ax8.plot(dates, monthly_avg_stats_df["Downward (%)"].to_numpy(
|
||||
), marker="o", label="Down (%)")
|
||||
|
||||
# 设置月份标签
|
||||
ax8.set_xticks(
|
||||
@ -313,7 +335,8 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
monthly_avg = filtered_df.groupby("year_month")[["Ek", "E_p"]].mean()
|
||||
|
||||
# 创建完整的月份范围(因为有的月份没数据)
|
||||
full_range = pd.period_range(start=monthly_avg.index.min(), end=monthly_avg.index.max(), freq="M")
|
||||
full_range = pd.period_range(
|
||||
start=monthly_avg.index.min(), end=monthly_avg.index.max(), freq="M")
|
||||
|
||||
# 创建一个新的 DataFrame 使用完整的年份月份范围
|
||||
full_monthly_avg = pd.DataFrame(index=full_range)
|
||||
@ -321,14 +344,18 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
full_monthly_avg = full_monthly_avg.join(monthly_avg)
|
||||
|
||||
# 确保 'Ek' 和 'E_p' 列为数值型
|
||||
full_monthly_avg["Ek"] = pd.to_numeric(full_monthly_avg["Ek"], errors="coerce")
|
||||
full_monthly_avg["E_p"] = pd.to_numeric(full_monthly_avg["E_p"], errors="coerce")
|
||||
full_monthly_avg["Ek"] = pd.to_numeric(
|
||||
full_monthly_avg["Ek"], errors="coerce")
|
||||
full_monthly_avg["E_p"] = pd.to_numeric(
|
||||
full_monthly_avg["E_p"], errors="coerce")
|
||||
|
||||
# 只显示每年6月、12月,简化图形
|
||||
|
||||
# 绘制 Ek、E_p
|
||||
ax9.plot(full_monthly_avg.index.values.astype(str), full_monthly_avg["Ek"].values, marker="o", linestyle="-", color="r", label="动能月平均值")
|
||||
ax9.plot(full_monthly_avg.index.values.astype(str), full_monthly_avg["E_p"].values, marker="o", linestyle="-", color="b", label="势能月平均值")
|
||||
ax9.plot(full_monthly_avg.index.values.astype(
|
||||
str), full_monthly_avg["Ek"].values, marker="o", linestyle="-", color="r", label="动能月平均值")
|
||||
ax9.plot(full_monthly_avg.index.values.astype(
|
||||
str), full_monthly_avg["E_p"].values, marker="o", linestyle="-", color="b", label="势能月平均值")
|
||||
|
||||
# 添加标题和标签
|
||||
ax9.set_title("动能和势能分布情况", fontsize=24)
|
||||
@ -339,11 +366,13 @@ def plot_year(data: pd.DataFrame, path: str, lat: float, g: float):
|
||||
months = full_monthly_avg.index.values.astype(str)
|
||||
# 获取所有年份的6月和12月的索引
|
||||
june_indices = [i for i, date in enumerate(months) if date.endswith("-06")]
|
||||
december_indices = [i for i, date in enumerate(months) if date.endswith("-12")]
|
||||
december_indices = [i for i, date in enumerate(
|
||||
months) if date.endswith("-12")]
|
||||
selected_indices = june_indices + december_indices
|
||||
|
||||
# 只显示选定的标签
|
||||
ax9.set_xticks(ticks=selected_indices, labels=[months[i] for i in selected_indices], rotation=45)
|
||||
ax9.set_xticks(ticks=selected_indices, labels=[
|
||||
months[i] for i in selected_indices], rotation=45)
|
||||
|
||||
# 添加网格和图例
|
||||
# plt.grid()
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
# 探空气球只有重力波
|
||||
|
||||
from io import BytesIO
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
@ -164,7 +166,7 @@ def plot_MFv(filtered_df1, ax):
|
||||
ax.set_ylabel("Occurrence(%)")
|
||||
|
||||
|
||||
def plot_horizontal_propagation(filtered_df:pd.DataFrame, season):
|
||||
def plot_horizontal_propagation(filtered_df: pd.DataFrame, season):
|
||||
def get_season(date):
|
||||
month = date.month
|
||||
if month in [12, 1, 2]:
|
||||
@ -179,26 +181,30 @@ def plot_horizontal_propagation(filtered_df:pd.DataFrame, season):
|
||||
gs = gridspec.GridSpec(2, 2)
|
||||
ax10 = []
|
||||
for i in range(4):
|
||||
ax10.append(fig.add_subplot(gs[i//2, i % 2 ], projection="windrose"))
|
||||
|
||||
ax10.append(fig.add_subplot(gs[i//2, i % 2], projection="windrose"))
|
||||
|
||||
data = filtered_df.copy()
|
||||
|
||||
|
||||
data.loc[:, "date"] = data["file_name"].str[-28:-16]
|
||||
filtered_df = data[["date"] + [col for col in data.columns if col != "file_name" and col != "date"]]
|
||||
filtered_df = data[[
|
||||
"date"] + [col for col in data.columns if col != "file_name" and col != "date"]]
|
||||
filtered_df.reset_index(drop=True, inplace=True)
|
||||
|
||||
filtered_df = filtered_df.drop_duplicates(subset="date", keep="last") # 使用 drop_duplicates 函数,保留每组重复中的最后一个记录
|
||||
filtered_df = filtered_df.drop_duplicates(
|
||||
subset="date", keep="last") # 使用 drop_duplicates 函数,保留每组重复中的最后一个记录
|
||||
|
||||
|
||||
filtered_df["date1"] = filtered_df["date"].str.split("T").str[0] # 再加一列,只保留日期部分
|
||||
filtered_df["date1"] = pd.to_datetime(filtered_df["date1"], format="%Y%m%d") # 确保 'date1' 列是日期格式
|
||||
filtered_df["date1"] = filtered_df["date"].str.split(
|
||||
"T").str[0] # 再加一列,只保留日期部分
|
||||
filtered_df["date1"] = pd.to_datetime(
|
||||
filtered_df["date1"], format="%Y%m%d") # 确保 'date1' 列是日期格式
|
||||
filtered_df["season"] = filtered_df["date1"].apply(get_season) # 添加季节列
|
||||
seasons = ["Winter", "Spring", "Summer", "Fall"]
|
||||
for ax, season in zip(ax10, seasons):
|
||||
season_data = filtered_df[filtered_df["season"] == season]
|
||||
windrose = WindroseAxes.from_ax(ax)
|
||||
ax.set_title(season, fontsize=18)
|
||||
windrose.bar(season_data["b"], np.ones_like(season_data["b"]), normed=False) # normed=True表示占每个季节的占比
|
||||
windrose.bar(season_data["b"], np.ones_like(
|
||||
season_data["b"]), normed=False) # normed=True表示占每个季节的占比
|
||||
# season_data = filtered_df[filtered_df["season"] == season]
|
||||
# windrose = WindroseAxes.from_ax(ax)
|
||||
# ax.set_title(season, fontsize=18)
|
||||
@ -208,17 +214,20 @@ def plot_horizontal_propagation(filtered_df:pd.DataFrame, season):
|
||||
|
||||
def plot_vertical_propagation(filtered_df, ax):
|
||||
data = filtered_df.copy()
|
||||
|
||||
|
||||
data.loc[:, "date"] = data["file_name"].str[-28:-16]
|
||||
filtered_df = data[["date"] + [col for col in data.columns if col != "file_name" and col != "date"]]
|
||||
filtered_df = data[[
|
||||
"date"] + [col for col in data.columns if col != "file_name" and col != "date"]]
|
||||
filtered_df.reset_index(drop=True, inplace=True)
|
||||
|
||||
filtered_df = filtered_df.drop_duplicates(subset="date", keep="last") # 使用 drop_duplicates 函数,保留每组重复中的最后一个记录
|
||||
filtered_df = filtered_df.drop_duplicates(
|
||||
subset="date", keep="last") # 使用 drop_duplicates 函数,保留每组重复中的最后一个记录
|
||||
|
||||
filtered_df["date1"] = filtered_df["date"].str.split(
|
||||
"T").str[0] # 再加一列,只保留日期部分
|
||||
filtered_df["date1"] = pd.to_datetime(
|
||||
filtered_df["date1"], format="%Y%m%d") # 确保 'date1' 列是日期格式
|
||||
|
||||
|
||||
filtered_df["date1"] = filtered_df["date"].str.split("T").str[0] # 再加一列,只保留日期部分
|
||||
filtered_df["date1"] = pd.to_datetime(filtered_df["date1"], format="%Y%m%d") # 确保 'date1' 列是日期格式
|
||||
|
||||
filtered_df.set_index("date1", inplace=True)
|
||||
monthly_stats_df = (
|
||||
filtered_df.groupby([filtered_df.index.month, filtered_df.index.year])
|
||||
@ -242,18 +251,20 @@ def plot_vertical_propagation(filtered_df, ax):
|
||||
|
||||
def plot_energy_distribution(filtered_df, ax):
|
||||
data = filtered_df.copy()
|
||||
|
||||
|
||||
data.loc[:, "date"] = data["file_name"].str[-28:-16]
|
||||
filtered_df = data[["date"] + [col for col in data.columns if col != "file_name" and col != "date"]]
|
||||
filtered_df = data[[
|
||||
"date"] + [col for col in data.columns if col != "file_name" and col != "date"]]
|
||||
filtered_df.reset_index(drop=True, inplace=True)
|
||||
|
||||
filtered_df = filtered_df.drop_duplicates(subset="date", keep="last") # 使用 drop_duplicates 函数,保留每组重复中的最后一个记录
|
||||
filtered_df = filtered_df.drop_duplicates(
|
||||
subset="date", keep="last") # 使用 drop_duplicates 函数,保留每组重复中的最后一个记录
|
||||
|
||||
filtered_df["date1"] = filtered_df["date"].str.split(
|
||||
"T").str[0] # 再加一列,只保留日期部分
|
||||
filtered_df["date1"] = pd.to_datetime(
|
||||
filtered_df["date1"], format="%Y%m%d") # 确保 'date1' 列是日期格式
|
||||
|
||||
|
||||
filtered_df["date1"] = filtered_df["date"].str.split("T").str[0] # 再加一列,只保留日期部分
|
||||
filtered_df["date1"] = pd.to_datetime(filtered_df["date1"], format="%Y%m%d") # 确保 'date1' 列是日期格式
|
||||
|
||||
|
||||
filtered_df.reset_index(inplace=True)
|
||||
filtered_df["year_month"] = filtered_df["date1"].dt.to_period("M")
|
||||
monthly_avg = filtered_df.groupby("year_month")[["Ek", "E_p"]].mean()
|
||||
|
||||
@ -2,8 +2,8 @@ from io import BytesIO
|
||||
from quart import Blueprint, request, send_file
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
from modules.cosmic.single import SingleCosmicWavePlot
|
||||
from modules.cosmic.temp_render import temp_render
|
||||
from modules.cosmic.gravityw_perday import CosmicGravitywPlot
|
||||
from modules.cosmic.planetw_daily import cosmic_planetw_daily_plot
|
||||
from quart.utils import run_sync
|
||||
|
||||
|
||||
@ -18,7 +18,7 @@ def get_meta():
|
||||
@cosmic_module.route('/temp_render')
|
||||
async def render():
|
||||
T = request.args.get("T", 16)
|
||||
await run_sync(temp_render)(T=int(T))
|
||||
await run_sync(cosmic_planetw_daily_plot)(T=int(T))
|
||||
buf = BytesIO()
|
||||
|
||||
plt.savefig(buf, format="png")
|
||||
@ -32,7 +32,7 @@ async def single_render():
|
||||
day = request.args.get("day", 1)
|
||||
mode = request.args.get("mode", "mean_ktemp_Nz")
|
||||
|
||||
p: SingleCosmicWavePlot = await run_sync(SingleCosmicWavePlot)(year=int(year), day=int(day))
|
||||
p: CosmicGravitywPlot = await run_sync(CosmicGravitywPlot)(year=int(year), day=int(day))
|
||||
if mode == "mean_ktemp_Nz":
|
||||
await run_sync(p.plot_results_mean_ktemp_Nz)()
|
||||
elif mode == "mean_ktemp_Ptz":
|
||||
|
||||
@ -10,7 +10,7 @@ import netCDF4 as nc
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
# 设置支持中文的字体
|
||||
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 设置字体为微软雅黑
|
||||
plt.rcParams['font.sans-serif'] = ['SimHei'] # 设置字体为微软雅黑
|
||||
plt.rcParams['axes.unicode_minus'] = False # 正常显示负号
|
||||
|
||||
# 定义处理单个文件的函数
|
||||
@ -379,7 +379,7 @@ def plot_results_mean_ktemp_Ptz(mean_ktemp_Ptz, heights):
|
||||
# plt.show()
|
||||
|
||||
|
||||
class SingleCosmicWavePlot:
|
||||
class CosmicGravitywPlot:
|
||||
|
||||
def __init__(self, year, day):
|
||||
BASE_PATH_COSMIC = DATA_BASEPATH.cosmic
|
||||
@ -17,7 +17,7 @@ matplotlib.rcParams['axes.unicode_minus'] = False # 正常显示负号
|
||||
# 读取一年的数据文件
|
||||
|
||||
|
||||
def temp_render(path: str = f"{DATA_BASEPATH.cosmic}/cosmic.txt", T=16):
|
||||
def cosmic_planetw_daily_plot(path: str = f"{DATA_BASEPATH.cosmic}/cosmic.txt", T=16):
|
||||
def u_func(x, *params):
|
||||
a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7, a8, b8, a9, b9 = params
|
||||
return (
|
||||
@ -4,8 +4,8 @@ from io import BytesIO
|
||||
from quart import Blueprint, request, send_file
|
||||
from matplotlib import pyplot as plt
|
||||
from CONSTANT import DATA_BASEPATH
|
||||
from modules.radar.plot_original import final_render_v2
|
||||
from modules.radar.plot_prod import final_plot_v2
|
||||
from modules.radar.allw_amplitude import radar_allw_amplitude
|
||||
from modules.radar.allw_plot_heatmap import radar_all_heatmap_plot
|
||||
from quart.utils import run_sync
|
||||
|
||||
BASE_PATH_RADAR = DATA_BASEPATH.radar
|
||||
@ -17,12 +17,12 @@ radar_module = Blueprint("Radar", __name__)
|
||||
|
||||
@radar_module.route("/metadata")
|
||||
def get_all_files():
|
||||
return final_render_v2.get_all_pathes()
|
||||
return radar_allw_amplitude.get_all_pathes()
|
||||
|
||||
|
||||
@radar_module.route("/metadata/models")
|
||||
def get_all_models():
|
||||
return final_render_v2.get_all_models()
|
||||
return radar_allw_amplitude.get_all_models()
|
||||
|
||||
|
||||
@radar_module.route('/render/heatmap')
|
||||
@ -41,7 +41,7 @@ async def render_v1():
|
||||
model_name = request.args.get('model_name')
|
||||
mode = request.args.get('mode')
|
||||
|
||||
renderer = await run_sync(final_render_v2)(int(H), int(year), station, wind_type)
|
||||
renderer = await run_sync(radar_allw_amplitude)(int(H), int(year), station, wind_type)
|
||||
if mode == "day":
|
||||
day = request.args.get('day')
|
||||
renderer.render_day(day, model_name)
|
||||
@ -79,7 +79,7 @@ async def render_v2():
|
||||
else:
|
||||
month_range = (1, 12)
|
||||
|
||||
buffer = await run_sync(final_plot_v2)(int(year), station, model_name, month_range)
|
||||
buffer = await run_sync(radar_all_heatmap_plot)(int(year), station, model_name, month_range)
|
||||
buffer.seek(0)
|
||||
|
||||
return await send_file(buffer, mimetype='image/png')
|
||||
|
||||
@ -24,6 +24,8 @@ matplotlib.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文
|
||||
matplotlib.rcParams['axes.unicode_minus'] = False # 正常显示负号
|
||||
|
||||
# 定义所有模型
|
||||
|
||||
|
||||
def tidal_model(t, v0, a1, b1, a2, b2, a3, b3, a4, b4):
|
||||
"""潮汐波模型"""
|
||||
T1, T2, T3, T4 = 6, 8, 12, 24 # 周期以小时为单位
|
||||
@ -32,26 +34,31 @@ def tidal_model(t, v0, a1, b1, a2, b2, a3, b3, a4, b4):
|
||||
+ a3 * np.sin((2 * np.pi / T3) * t + b3)
|
||||
+ a4 * np.sin((2 * np.pi / T4) * t + b4))
|
||||
|
||||
|
||||
def planetary_model_2day(t, v0, a, b):
|
||||
"""2日行星波模型"""
|
||||
T = 48 # 周期为2天(48小时)
|
||||
return v0 + a * np.sin((2 * np.pi / T) * t + b)
|
||||
|
||||
|
||||
def planetary_model_5day(t, v0, a, b):
|
||||
"""5日行星波模型"""
|
||||
T = 120 # 周期为5天(120小时)
|
||||
return v0 + a * np.sin((2 * np.pi / T) * t + b)
|
||||
|
||||
|
||||
def planetary_model_10day(t, v0, a, b):
|
||||
"""10日行星波模型"""
|
||||
T = 240 # 周期为10天(240小时)
|
||||
return v0 + a * np.sin((2 * np.pi / T) * t + b)
|
||||
|
||||
|
||||
def planetary_model_16day(t, v0, a, b):
|
||||
"""16日行星波模型"""
|
||||
T = 384 # 周期为16天(384小时)
|
||||
return v0 + a * np.sin((2 * np.pi / T) * t + b)
|
||||
|
||||
|
||||
# 统一管理模型及其参数
|
||||
MODELS = {
|
||||
'潮汐波': {
|
||||
@ -92,7 +99,9 @@ MODELS = {
|
||||
},
|
||||
}
|
||||
|
||||
################################ 选定风的类型,得到整年的每一天拟合参数
|
||||
# 选定风的类型,得到整年的每一天拟合参数
|
||||
|
||||
|
||||
def process_wind_data_with_models(final_df, wind_type, year, H, selected_models=None):
|
||||
"""
|
||||
处理风速数据并针对多种模型进行拟合和绘图。
|
||||
@ -136,8 +145,10 @@ def process_wind_data_with_models(final_df, wind_type, year, H, selected_models=
|
||||
|
||||
for date in unique_dates:
|
||||
# 根据模型窗口调整时间范围
|
||||
start_date = pd.to_datetime(date) - pd.Timedelta(days=(window_days - 1) / 2)
|
||||
end_date = pd.to_datetime(date) + pd.Timedelta(days=(window_days - 1) / 2)
|
||||
start_date = pd.to_datetime(
|
||||
date) - pd.Timedelta(days=(window_days - 1) / 2)
|
||||
end_date = pd.to_datetime(
|
||||
date) + pd.Timedelta(days=(window_days - 1) / 2)
|
||||
window_data = data[start_date:end_date]
|
||||
t = np.arange(len(window_data)) + 1
|
||||
y = pd.to_numeric(window_data['wind_speed'], errors='coerce')
|
||||
@ -145,20 +156,23 @@ def process_wind_data_with_models(final_df, wind_type, year, H, selected_models=
|
||||
t = t[valid_mask]
|
||||
y = y[valid_mask]
|
||||
|
||||
if len(y) < len(initial_guess) * 2: # todo:认为如果数据点个数少于参数的2倍,则认为数据不够拟合,之前是6倍,在这里放宽了
|
||||
print(f"Not enough valid data after filtering for date: {date}")
|
||||
if len(y) < len(initial_guess) * 2: # todo:认为如果数据点个数少于参数的2倍,则认为数据不够拟合,之前是6倍,在这里放宽了
|
||||
print(
|
||||
f"Not enough valid data after filtering for date: {date}")
|
||||
params.append([None] * len(param_names))
|
||||
continue
|
||||
|
||||
try:
|
||||
popt, _ = curve_fit(model_func, t, y, p0=initial_guess, bounds=bounds)
|
||||
popt, _ = curve_fit(
|
||||
model_func, t, y, p0=initial_guess, bounds=bounds)
|
||||
params.append(popt)
|
||||
except RuntimeError:
|
||||
print(f"Fitting failed for date: {date}")
|
||||
params.append([None] * len(param_names))
|
||||
|
||||
# 保存整年参数
|
||||
params_df = pd.DataFrame(params, columns=param_names, index=unique_dates)
|
||||
params_df = pd.DataFrame(
|
||||
params, columns=param_names, index=unique_dates)
|
||||
# file_name = f'{year}年_{wind_type}_{H // 1000}km_{model_name}_强度.txt'
|
||||
# params_df.to_csv(os.path.join(output_dir, file_name), sep='\t', index=True, header=True)
|
||||
|
||||
@ -167,7 +181,9 @@ def process_wind_data_with_models(final_df, wind_type, year, H, selected_models=
|
||||
return all_params # 返回包含所有模型参数的数据字典
|
||||
|
||||
# 外部绘制全年图形
|
||||
def plot_yearly_params(params_dict, model_name,year, wind_type, H):
|
||||
|
||||
|
||||
def plot_yearly_params(params_dict, model_name, year, wind_type, H):
|
||||
"""
|
||||
绘制全年的参数图形。
|
||||
|
||||
@ -183,7 +199,8 @@ def plot_yearly_params(params_dict, model_name,year, wind_type, H):
|
||||
plt.plot(params_df[f'a{i}'], label=f'{T}h', linewidth=2)
|
||||
else: # 处理行星波单分量
|
||||
plt.figure(figsize=(12, 6))
|
||||
plt.plot(params_df['a'], label=f'{model_name}', color='orange', linewidth=2)
|
||||
plt.plot(params_df['a'],
|
||||
label=f'{model_name}', color='orange', linewidth=2)
|
||||
|
||||
plt.title(f'{year}年{wind_type}_{H // 1000}km_{model_name}_强度', fontsize=16)
|
||||
plt.xlabel('日期', fontsize=14)
|
||||
@ -194,6 +211,8 @@ def plot_yearly_params(params_dict, model_name,year, wind_type, H):
|
||||
plt.tight_layout()
|
||||
|
||||
# 绘制月振幅图
|
||||
|
||||
|
||||
def plot_month_params(params_dict, model_name, wind_type, year, month, H):
|
||||
"""
|
||||
绘制指定月份的参数图形。
|
||||
@ -214,7 +233,8 @@ def plot_month_params(params_dict, model_name, wind_type, year, month, H):
|
||||
params_df.index = pd.to_datetime(params_df.index)
|
||||
|
||||
# 筛选出指定年份和月份的数据
|
||||
params_df_month = params_df[(params_df.index.year == year) & (params_df.index.month == month)]
|
||||
params_df_month = params_df[(params_df.index.year == year) & (
|
||||
params_df.index.month == month)]
|
||||
|
||||
# 检查筛选后的数据是否为空
|
||||
if params_df_month.empty:
|
||||
@ -231,15 +251,18 @@ def plot_month_params(params_dict, model_name, wind_type, year, month, H):
|
||||
if col_name in params_df_month.columns:
|
||||
plt.plot(params_df_month[col_name], label=f'{T}h', linewidth=2)
|
||||
else:
|
||||
print(f"Warning: Column '{col_name}' not found in the dataframe.")
|
||||
print(
|
||||
f"Warning: Column '{col_name}' not found in the dataframe.")
|
||||
else: # 处理行星波单分量
|
||||
if 'a' in params_df_month.columns:
|
||||
plt.plot(params_df_month['a'], label=f'{model_name}', color='orange', linewidth=2)
|
||||
plt.plot(
|
||||
params_df_month['a'], label=f'{model_name}', color='orange', linewidth=2)
|
||||
else:
|
||||
print(f"Warning: Column 'a' not found in the dataframe.")
|
||||
|
||||
# 设置标题和标签
|
||||
plt.title(f'{year}年{month}月{wind_type}_{H // 1000}km_{model_name}_强度', fontsize=16)
|
||||
plt.title(
|
||||
f'{year}年{month}月{wind_type}_{H // 1000}km_{model_name}_强度', fontsize=16)
|
||||
plt.xlabel('日期', fontsize=14)
|
||||
plt.ylabel('幅度 (m/s)', fontsize=14)
|
||||
plt.xticks(rotation=45)
|
||||
@ -248,7 +271,6 @@ def plot_month_params(params_dict, model_name, wind_type, year, month, H):
|
||||
plt.tight_layout()
|
||||
|
||||
|
||||
|
||||
# 绘制日拟合正弦波图
|
||||
def plot_sine_wave_for_day(params_df, date, wind_type, H, model_name):
|
||||
"""
|
||||
@ -273,14 +295,16 @@ def plot_sine_wave_for_day(params_df, date, wind_type, H, model_name):
|
||||
|
||||
# 获取指定日期的参数
|
||||
if date not in params_df.index:
|
||||
print(f"Warning: {date.strftime('%Y-%m-%d')} not found in the parameter dataframe.")
|
||||
print(
|
||||
f"Warning: {date.strftime('%Y-%m-%d')} not found in the parameter dataframe.")
|
||||
return
|
||||
|
||||
params = params_df.loc[date]
|
||||
|
||||
# 检查参数是否为 NaN 或 Inf
|
||||
if params.isnull().any() or np.isinf(params).any():
|
||||
print(f"Warning: Parameters for {date.strftime('%Y-%m-%d')} contain NaN or Inf values. Skipping the plot.")
|
||||
print(
|
||||
f"Warning: Parameters for {date.strftime('%Y-%m-%d')} contain NaN or Inf values. Skipping the plot.")
|
||||
return
|
||||
|
||||
# 判断是否是潮汐波模型
|
||||
@ -317,7 +341,8 @@ def plot_sine_wave_for_day(params_df, date, wind_type, H, model_name):
|
||||
plt.plot(t, y5, label='24h', color='red')
|
||||
|
||||
# 添加图例和标签
|
||||
plt.title(f'{date.strftime("%Y-%m-%d")} {wind_type}风速拟合潮汐波', fontsize=16)
|
||||
plt.title(
|
||||
f'{date.strftime("%Y-%m-%d")} {wind_type}风速拟合潮汐波', fontsize=16)
|
||||
plt.xlabel('时间 (小时)', fontsize=14)
|
||||
plt.ylabel('幅度 (m/s)', fontsize=14)
|
||||
plt.legend()
|
||||
@ -360,7 +385,8 @@ def plot_sine_wave_for_day(params_df, date, wind_type, H, model_name):
|
||||
plt.plot(t, y2, label=f'{model_name} ({T}小时)', color='orange')
|
||||
|
||||
# 添加图例和标签
|
||||
plt.title(f'{date.strftime("%Y-%m-%d")} {wind_type}风速拟合 {model_name}', fontsize=16)
|
||||
plt.title(
|
||||
f'{date.strftime("%Y-%m-%d")} {wind_type}风速拟合 {model_name}', fontsize=16)
|
||||
plt.xlabel('时间 (小时)', fontsize=14)
|
||||
plt.ylabel('幅度 (m/s)', fontsize=14)
|
||||
plt.legend()
|
||||
@ -372,14 +398,14 @@ def plot_sine_wave_for_day(params_df, date, wind_type, H, model_name):
|
||||
|
||||
|
||||
def get_final_df(H, year, station):
|
||||
|
||||
# 下边处理,绘图
|
||||
# 参数设置,选择高度、时间、站点
|
||||
|
||||
# # station = '黑龙江漠河站'
|
||||
# 下边处理,绘图
|
||||
# 参数设置,选择高度、时间、站点
|
||||
|
||||
# # station = '黑龙江漠河站'
|
||||
|
||||
# 文件路径设置
|
||||
|
||||
|
||||
file_dir = rf'{DATA_BASEPATH.radar}/{station}/{year}' # 数据文件路径
|
||||
# output_dir = rf'./out\{station}\{year}' # 参数txt、图片输出路径
|
||||
# os.makedirs(output_dir, exist_ok=True) # 确保输出路径存在
|
||||
@ -411,18 +437,22 @@ def get_final_df(H, year, station):
|
||||
})
|
||||
|
||||
# 只保留需要的列
|
||||
filtered_df = filtered_df[['height', 'time', f'{date_part}_uwind', f'{date_part}_vwind']]
|
||||
filtered_df = filtered_df[['height', 'time',
|
||||
f'{date_part}_uwind', f'{date_part}_vwind']]
|
||||
|
||||
# 如果 final_df 为空,则直接赋值
|
||||
if final_df.empty:
|
||||
final_df = filtered_df
|
||||
else:
|
||||
# 按 'height' 和 'time' 列对齐合并
|
||||
final_df = pd.merge(final_df, filtered_df, on=['height', 'time'], how='outer')
|
||||
final_df = pd.merge(final_df, filtered_df, on=[
|
||||
'height', 'time'], how='outer')
|
||||
|
||||
# 生成完整日期范围的列名,补全缺失的天,使得平年365,闰年366
|
||||
all_dates = pd.date_range(f'{year}-01-01', f'{year}-12-31').strftime('%Y%m%d')
|
||||
expected_columns = [f'{date}_uwind' for date in all_dates] + [f'{date}_vwind' for date in all_dates]
|
||||
all_dates = pd.date_range(
|
||||
f'{year}-01-01', f'{year}-12-31').strftime('%Y%m%d')
|
||||
expected_columns = [f'{date}_uwind' for date in all_dates] + \
|
||||
[f'{date}_vwind' for date in all_dates]
|
||||
|
||||
# 确保 final_df 包含所有日期的列,缺失列补为 NaN
|
||||
for col in expected_columns:
|
||||
@ -443,34 +473,38 @@ def get_final_df(H, year, station):
|
||||
# selected_models=None)
|
||||
|
||||
|
||||
class final_render_v2():
|
||||
class radar_allw_amplitude():
|
||||
def __init__(self, H, year, station, wind_type):
|
||||
self.H , self.year, self.station, self.wind_type = H, year, station, wind_type
|
||||
self.H, self.year, self.station, self.wind_type = H, year, station, wind_type
|
||||
final_df = get_final_df(H, year, station)
|
||||
self.params_dict = process_wind_data_with_models(final_df, wind_type, year, H, selected_models=None)
|
||||
|
||||
self.params_dict = process_wind_data_with_models(
|
||||
final_df, wind_type, year, H, selected_models=None)
|
||||
|
||||
def render_day(self, date, model_name):
|
||||
plot_sine_wave_for_day(self.params_dict[model_name], date, self.wind_type, self.H, model_name)
|
||||
|
||||
plot_sine_wave_for_day(
|
||||
self.params_dict[model_name], date, self.wind_type, self.H, model_name)
|
||||
|
||||
def render_month(self, month, model_name):
|
||||
plot_month_params(self.params_dict, model_name, self.wind_type, self.year, month, self.H)
|
||||
|
||||
plot_month_params(self.params_dict, model_name,
|
||||
self.wind_type, self.year, month, self.H)
|
||||
|
||||
def render_year(self, model_name):
|
||||
plot_yearly_params(self.params_dict, model_name, self.year, self.wind_type, self.H)
|
||||
|
||||
plot_yearly_params(self.params_dict, model_name,
|
||||
self.year, self.wind_type, self.H)
|
||||
|
||||
@staticmethod
|
||||
def get_all_models():
|
||||
return list(MODELS.keys())
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_all_pathes():
|
||||
#
|
||||
result = glob.glob(f"{DATA_BASEPATH.radar}/**/*.txt", recursive=True)
|
||||
#
|
||||
result = glob.glob(f"{DATA_BASEPATH.radar}/**/*.txt", recursive=True)
|
||||
# normalize path
|
||||
result = [os.path.normpath(path).replace("\\", "/") for path in result]
|
||||
return result
|
||||
|
||||
|
||||
|
||||
|
||||
# todo:此时得到的参数,年份、高度、风的类型已经固定
|
||||
|
||||
# 3---------------------------绘图
|
||||
@ -482,10 +516,10 @@ if __name__ == '__main__':
|
||||
fm.fontManager.addfont("./SimHei.ttf")
|
||||
|
||||
# station = '武汉左岭镇站'
|
||||
renderer = final_render_v2(94000, 2022, '武汉左岭镇站', 'uwind')
|
||||
renderer = radar_allw_amplitude(94000, 2022, '武汉左岭镇站', 'uwind')
|
||||
renderer.render_year('潮汐波')
|
||||
plt.show()
|
||||
renderer.render_month(3, '潮汐波')
|
||||
plt.show()
|
||||
renderer.render_day('2022-03-17', '潮汐波')
|
||||
plt.show()
|
||||
plt.show()
|
||||
@ -1,3 +1,6 @@
|
||||
# 这东西是花热力图的
|
||||
# 也是两种波都能画
|
||||
|
||||
import glob
|
||||
from io import BytesIO
|
||||
import os
|
||||
@ -34,7 +37,7 @@ def planetary_model(t, v0, a, b, period):
|
||||
# ------------------------------ 数据处理工具函数 ------------------------------
|
||||
|
||||
|
||||
def preprocess_data(file_list, year, month_range=(1,12)):
|
||||
def preprocess_data(file_list, year, month_range=(1, 12)):
|
||||
def filter_given_month(month_range, file_list):
|
||||
begin_month, end_month = month_range
|
||||
get_date_pattern = r'_(\d{8})\.txt'
|
||||
@ -47,7 +50,7 @@ def preprocess_data(file_list, year, month_range=(1,12)):
|
||||
return filtered_file_list
|
||||
if file_list == []:
|
||||
raise ValueError("No data files found.")
|
||||
|
||||
|
||||
file_list = filter_given_month(month_range, file_list)
|
||||
"""读取数据文件并合并成完整的 DataFrame"""
|
||||
# file_list = [f for f in os.listdir(file_dir) if f.endswith('.txt')]
|
||||
@ -162,7 +165,7 @@ def plot_heatmaps(result_dict, param, title, vmax):
|
||||
|
||||
|
||||
# ------------------------------ 主逻辑 ------------------------------
|
||||
def final_plot_v2(year, station, model_name, month_range=(1,12)):
|
||||
def radar_all_heatmap_plot(year, station, model_name, month_range=(1, 12)):
|
||||
"""
|
||||
主逻辑函数,用于处理数据并绘制选定模型的热力图。
|
||||
|
||||
@ -3,10 +3,10 @@ from io import BytesIO
|
||||
from quart import Blueprint, request, send_file
|
||||
from matplotlib import pyplot as plt
|
||||
from CONSTANT import DATA_BASEPATH
|
||||
from modules.saber.archive.gravity_plot import do_gravity_plot
|
||||
from modules.saber.archive.gravity_process import process_gravity_data
|
||||
from modules.saber.process import DataProcessor
|
||||
from modules.saber.render import Renderer
|
||||
from modules.saber.planetw_plot import saber_planetw_plot
|
||||
from modules.saber.planetw_process import saber_planetw_process
|
||||
from modules.saber.gravityw_process import SaberGravitywProcessor
|
||||
from modules.saber.gravityw_render import SaberGravitywRenderer
|
||||
from modules.saber.utils import *
|
||||
from quart.utils import run_sync
|
||||
|
||||
@ -19,7 +19,7 @@ saber_module = Blueprint('saber', __name__)
|
||||
|
||||
# processor = DataProcessor(
|
||||
# lat_range=lat_range, alt_range=alt_range, lambda_range=lambda_range, lvboin=lvboin)
|
||||
renderer = Renderer()
|
||||
renderer = SaberGravitywRenderer()
|
||||
|
||||
|
||||
def get_processer():
|
||||
@ -40,7 +40,7 @@ def get_processer():
|
||||
if lvboin is None:
|
||||
lvboin = True
|
||||
|
||||
_p = DataProcessor(
|
||||
_p = SaberGravitywProcessor(
|
||||
lat_range=lat_range, alt_range=alt_range, lambda_range=lambda_range, lvboin=lvboin)
|
||||
return _p
|
||||
|
||||
@ -142,6 +142,6 @@ async def do_gravity_year():
|
||||
T = int(T)
|
||||
if T not in [5, 10, 16]:
|
||||
raise ValueError("T must be in [5, 10, 16]")
|
||||
data = await run_sync(process_gravity_data)(year)
|
||||
await run_sync(do_gravity_plot)(data, T)
|
||||
data = await run_sync(saber_planetw_process)(year)
|
||||
await run_sync(saber_planetw_plot)(data, T)
|
||||
return await extract_payload()
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
# 推断出这是重力波
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
|
||||
@ -138,7 +138,7 @@ class YearlyData:
|
||||
return cls(date_times=data["date_time_list"], wave_data=wave_data)
|
||||
|
||||
|
||||
class DataProcessor:
|
||||
class SaberGravitywProcessor:
|
||||
def __init__(self, lat_range: Tuple[float, float],
|
||||
alt_range: Tuple[float, float],
|
||||
lambda_range: Tuple[float, float],
|
||||
@ -234,7 +234,7 @@ if __name__ == "__main__":
|
||||
lamda_low = 2
|
||||
lamda_high = 15
|
||||
lvboin = True
|
||||
processor = DataProcessor(
|
||||
processor = SaberGravitywProcessor(
|
||||
lat_range=(latitude_min, latitude_max),
|
||||
alt_range=(altitude_min, altitude_max),
|
||||
lambda_range=(lamda_low, lamda_high),
|
||||
@ -6,7 +6,7 @@ from matplotlib.figure import Figure
|
||||
from matplotlib.axes import Axes
|
||||
import matplotlib.dates as mdates
|
||||
|
||||
from modules.saber.process import DataProcessor, WaveData, YearlyData
|
||||
from modules.saber.gravityw_process import SaberGravitywProcessor, WaveData, YearlyData
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -21,7 +21,7 @@ class PlotConfig:
|
||||
tick_fontsize: int = 8
|
||||
|
||||
|
||||
class Renderer:
|
||||
class SaberGravitywRenderer:
|
||||
def __init__(self, config: Optional[PlotConfig] = None):
|
||||
self.config = config or PlotConfig()
|
||||
plt.rcParams['font.family'] = 'SimHei' # 设置为黑体(需要你的环境中有该字体)
|
||||
@ -27,7 +27,7 @@ bounds = (
|
||||
np.inf, np.inf, np.inf]) # 上界
|
||||
|
||||
|
||||
def do_gravity_plot(df: pd.DataFrame, T=16):
|
||||
def saber_planetw_plot(df: pd.DataFrame, T=16):
|
||||
# 定义拟合函数
|
||||
def u_func(x, *params):
|
||||
a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7, a8, b8, a9, b9 = params
|
||||
@ -20,7 +20,7 @@ months = [
|
||||
]
|
||||
|
||||
|
||||
def process_gravity_data(year):
|
||||
def saber_planetw_process(year):
|
||||
# 定义文件路径模板和年份
|
||||
|
||||
base_path = DATA_BASEPATH.saber + \
|
||||
@ -1,3 +1,5 @@
|
||||
# 给重力波用的
|
||||
|
||||
from dataclasses import dataclass
|
||||
from os import path
|
||||
import numpy as np
|
||||
|
||||
@ -5,8 +5,8 @@ from quart.utils import run_sync
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
from CONSTANT import DATA_BASEPATH
|
||||
from modules.tidi.plot import TidiPlotv2
|
||||
from modules.tidi.staged.plot import tidi_render
|
||||
from modules.tidi.gravity_wave_year import TidiPlotGWYear
|
||||
from modules.tidi.planet_wave_daily import TidiPlotPWDaily
|
||||
|
||||
|
||||
tidi_module = Blueprint("Tidi", __name__)
|
||||
@ -37,7 +37,7 @@ async def render_wave():
|
||||
height = float(height)
|
||||
lat_range = tuple(map(int, lat_range.split('~')))
|
||||
|
||||
await run_sync(tidi_render)(mode, year, k, height, lat_range, T)
|
||||
await run_sync(TidiPlotPWDaily)(mode, year, k, height, lat_range, T)
|
||||
buffer = BytesIO()
|
||||
plt.savefig(buffer, format="png")
|
||||
buffer.seek(0)
|
||||
@ -50,7 +50,7 @@ async def render_stats_v1():
|
||||
year = request.args.get('year')
|
||||
year = int(year)
|
||||
|
||||
plotter = await run_sync(TidiPlotv2)(year)
|
||||
plotter = await run_sync(TidiPlotGWYear)(year)
|
||||
await run_sync(plotter.plot_v1)()
|
||||
buffer = BytesIO()
|
||||
plt.savefig(buffer, format="png")
|
||||
@ -63,7 +63,7 @@ async def render_stats_v2():
|
||||
year = request.args.get('year')
|
||||
year = int(year)
|
||||
|
||||
plotter = await run_sync(TidiPlotv2)(year)
|
||||
plotter = await run_sync(TidiPlotGWYear)(year)
|
||||
await run_sync(plotter.plot_month)()
|
||||
buffer = BytesIO()
|
||||
plt.savefig(buffer, format="png")
|
||||
|
||||
@ -759,7 +759,7 @@ def day_to_month(day):
|
||||
return f'{["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"][i]}'
|
||||
|
||||
|
||||
class TidiPlotv2:
|
||||
class TidiPlotGWYear:
|
||||
def __init__(self, year):
|
||||
self.year = year
|
||||
cache_path = f"{DATA_BASEPATH.tidi}/cache"
|
||||
@ -13,7 +13,7 @@ import matplotlib.pyplot as plt
|
||||
import matplotlib
|
||||
|
||||
from CONSTANT import DATA_BASEPATH
|
||||
from modules.tidi.staged.process import tidi_process_idk
|
||||
from modules.tidi.tidi_pw_process import extract_tidi_pw_data
|
||||
# 设置中文显示和负号正常显示
|
||||
matplotlib.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文
|
||||
matplotlib.rcParams['axes.unicode_minus'] = False # 正常显示负号
|
||||
@ -33,7 +33,7 @@ bounds = (
|
||||
|
||||
# 定义拟合函数
|
||||
|
||||
def tidi_render(
|
||||
def TidiPlotPWDaily(
|
||||
mode,
|
||||
year,
|
||||
k,
|
||||
@ -55,7 +55,7 @@ def tidi_render(
|
||||
if os.path.exists(data_cache_path):
|
||||
df = pd.read_parquet(data_cache_path)
|
||||
else:
|
||||
df = tidi_process_idk(
|
||||
df = extract_tidi_pw_data(
|
||||
year, input_height, lat_range
|
||||
)[mode]
|
||||
df.to_parquet(data_cache_path)
|
||||
@ -146,8 +146,8 @@ def tidi_render(
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
tidi_render('V_Zonal', 2015, 1)
|
||||
tidi_render('V_Meridional', 2015, 1)
|
||||
TidiPlotPWDaily('V_Zonal', 2015, 1)
|
||||
TidiPlotPWDaily('V_Meridional', 2015, 1)
|
||||
|
||||
# # 用于存储拟合参数结果的列表
|
||||
# all_fit_results = []
|
||||
@ -1,878 +0,0 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from scipy.io import loadmat
|
||||
from scipy.optimize import curve_fit
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
# ---------------------------------------------------------------------------------------
|
||||
# -----vzonal----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def process_vzonal_day(day, year, ):
|
||||
try:
|
||||
# 读取数据
|
||||
height_data = loadmat(rf"./tidi/data\{year}\{day:03d}_Height.mat")
|
||||
lat_data = loadmat(rf"./tidi/data\{year}\{day:03d}_Lat.mat")
|
||||
lon_data = loadmat(rf"./tidi/data\{year}\{day:03d}_Lon.mat")
|
||||
vmeridional_data = loadmat(
|
||||
rf"./tidi/data\{year}\{day:03d}_VMerdional.mat")
|
||||
vzonal_data = loadmat(rf"./tidi/data\{year}\{day:03d}_Vzonal.mat")
|
||||
|
||||
# 将数据转换为DataFrame
|
||||
height_df = pd.DataFrame(height_data['Height'])
|
||||
lat_df = pd.DataFrame(lat_data['Lat'])
|
||||
lon_df = pd.DataFrame(lon_data['Lon'])
|
||||
vmeridional_df = pd.DataFrame(vmeridional_data['VMerdional'])
|
||||
vzonal_df = pd.DataFrame(vzonal_data['Vzonal'])
|
||||
# 将经纬度拼接为两列并添加到对应的DataFrame中
|
||||
lon_lat_df = pd.concat([lon_df, lat_df], axis=1)
|
||||
lon_lat_df.columns = ['Longitude', 'Latitude']
|
||||
# 筛选出10到30度纬度范围的数据
|
||||
lat_filter = (lat_df.values >= 0) & (lat_df.values <= 20)
|
||||
# 使用纬度范围过滤数据
|
||||
vmeridional_filtered = vmeridional_df.iloc[:, lat_filter.flatten()]
|
||||
vzonal_filtered = vzonal_df.iloc[:, lat_filter.flatten()]
|
||||
lon_lat_filtered = lon_lat_df.iloc[lat_filter.flatten(), :]
|
||||
# 接着对lon_lat_filtered的经度进行分组,0到360度每30度一个区间
|
||||
bins = range(0, 361, 30)
|
||||
group_labels = [f"{i}-{i + 29}" for i in range(0, 360, 30)]
|
||||
|
||||
lon_lat_filtered['Longitude_Group'] = pd.cut(
|
||||
lon_lat_filtered['Longitude'], bins=bins, labels=group_labels)
|
||||
|
||||
# 获取所有唯一的经度分组标签并按照数值顺序排序
|
||||
unique_groups = sorted(lon_lat_filtered['Longitude_Group'].unique(
|
||||
), key=lambda x: int(x.split('-')[0]))
|
||||
# 按照经度分组获取每个区间对应的vzonal_filtered、vmeridional_filtered数据
|
||||
grouped_data = {}
|
||||
insufficient_data_count = 0 # 用于计数数据不足的组数
|
||||
|
||||
for group in unique_groups:
|
||||
mask = lon_lat_filtered['Longitude_Group'] == group
|
||||
grouped_data[group] = {
|
||||
'vzonal_filtered': vzonal_filtered.loc[:, mask],
|
||||
'vmeridional_filtered': vmeridional_filtered.loc[:, mask],
|
||||
'lon_lat_filtered': lon_lat_filtered.loc[mask]
|
||||
}
|
||||
|
||||
# 计算有效值数量
|
||||
vzonal_count = grouped_data[group]['vzonal_filtered'].notna(
|
||||
).sum().sum()
|
||||
vmeridional_count = grouped_data[group]['vmeridional_filtered'].notna(
|
||||
).sum().sum()
|
||||
|
||||
if vzonal_count <= 20 or vmeridional_count <= 20:
|
||||
insufficient_data_count += 1
|
||||
|
||||
# 如果超过6组数据不足,则抛出错误
|
||||
if insufficient_data_count > 6:
|
||||
expected_length = 21
|
||||
return pd.Series(np.nan, index=range(expected_length))
|
||||
|
||||
# 如果代码运行到这里,说明所有分组的数据量都足够或者不足的组数不超过6
|
||||
print("所有分组的数据量都足够")
|
||||
# -----------计算w0------------------------------------------------------------------------------------------
|
||||
# 定义期望的12个区间的分组标签
|
||||
expected_groups = [f"{i}-{i + 29}" for i in range(0, 360, 30)]
|
||||
# 初始化一个空DataFrame来存储所有区间的均值廓线,列名设置为期望的分组标签
|
||||
W0_profiles_df = pd.DataFrame(columns=expected_groups)
|
||||
|
||||
# 遍历grouped_data字典中的每个组
|
||||
for group, data in grouped_data.items():
|
||||
# 提取当前组的vzonal_filtered数据
|
||||
vzonal_filtered = data['vzonal_filtered']
|
||||
# 计算有效数据的均值廓线,跳过NaN值
|
||||
mean_profile = vzonal_filtered.mean(axis=1, skipna=True)
|
||||
# 将当前组的均值廓线作为一列添加到W0_profiles_df DataFrame中
|
||||
W0_profiles_df[group] = mean_profile
|
||||
|
||||
# 检查并填充缺失的区间列,将缺失的列添加并填充为NaN
|
||||
for group in expected_groups:
|
||||
if group not in W0_profiles_df.columns:
|
||||
W0_profiles_df[group] = pd.Series(
|
||||
[float('NaN')] * len(W0_profiles_df))
|
||||
|
||||
# 打印拼接后的DataFrame以验证
|
||||
print("Concatenated mean profiles for all longitude groups:\n", W0_profiles_df)
|
||||
# 计算每个高度的均值
|
||||
height_mean_profiles = W0_profiles_df.mean(axis=1)
|
||||
# 将每个高度的均值作为新的一行添加到DataFrame中,All_Heights_Mean就是wn0
|
||||
W0_profiles_df['All_Heights_Mean'] = height_mean_profiles
|
||||
wn0_df = W0_profiles_df['All_Heights_Mean']
|
||||
# -------计算残余量--------------------------------------------------------------------------------------
|
||||
# 计算每个经度区间的残余值 (即每个区间的值减去该高度的All_Heights_Mean)
|
||||
residuals_df = W0_profiles_df.drop(columns='All_Heights_Mean').subtract(
|
||||
W0_profiles_df['All_Heights_Mean'], axis=0)
|
||||
|
||||
# --------wn1-------------------------
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 12 * x + phi) + C
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results = []
|
||||
for index, row in residuals_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results.append((0, 0, 0))
|
||||
# 将拟合结果转换为DataFrame
|
||||
fit_results_df = pd.DataFrame(fit_results, columns=['A', 'phi', 'C'])
|
||||
print(fit_results_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn1_values = []
|
||||
for index, row in fit_results_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn1 = single_harmonic(x, A, phi, C)
|
||||
wn1_values.append(wn1)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn1_df = pd.DataFrame(wn1_values, columns=[
|
||||
f'wn1_{i}' for i in range(12)])
|
||||
print(wn1_df)
|
||||
# 如果wn1_df全为0,则跳过下面的计算,直接令该天的day_log_gwresult全部为NaN
|
||||
if (wn1_df == 0).all().all():
|
||||
return pd.Series(np.nan, index=range(21))
|
||||
# ------------计算temp-wn0-wn1---------------------------------------------------------
|
||||
temp_wn0_wn1 = residuals_df.values - wn1_df.values
|
||||
# 将结果转为 DataFrame
|
||||
temp_wn0_wn1_df = pd.DataFrame(
|
||||
temp_wn0_wn1, columns=residuals_df.columns, index=residuals_df.index)
|
||||
|
||||
# -------wn2--------------------------------------------------------------------------
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 6 * x + phi) + C
|
||||
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results2 = []
|
||||
for index, row in temp_wn0_wn1_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results2.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results2.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results2.append((0, 0, 0))
|
||||
# 将拟合结果转换为DataFrame
|
||||
fit_results2_df = pd.DataFrame(fit_results2, columns=['A', 'phi', 'C'])
|
||||
print(fit_results2_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn2_values = []
|
||||
for index, row in fit_results2_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn2 = single_harmonic(x, A, phi, C)
|
||||
wn2_values.append(wn2)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn2_df = pd.DataFrame(wn2_values, columns=[
|
||||
f'wn2_{i}' for i in range(12)])
|
||||
print(wn2_df)
|
||||
# ---------计算temp-wn0-wn1-wn2------------------------------------------------------
|
||||
temp_wn0_wn1_wn2 = temp_wn0_wn1_df.values - wn2_df.values
|
||||
# 转换为 DataFrame
|
||||
temp_wn0_wn1_wn2_df = pd.DataFrame(
|
||||
temp_wn0_wn1_wn2, columns=temp_wn0_wn1_df.columns)
|
||||
|
||||
# -------wn3-----------------------------------------------------------------------
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 4 * x + phi) + C
|
||||
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results3 = []
|
||||
for index, row in temp_wn0_wn1_wn2_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results3.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results3.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results3.append((0, 0, 0))
|
||||
# 将拟合结果转换为DataFrame
|
||||
fit_results3_df = pd.DataFrame(fit_results3, columns=['A', 'phi', 'C'])
|
||||
print(fit_results3_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn3_values = []
|
||||
for index, row in fit_results3_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn3 = single_harmonic(x, A, phi, C)
|
||||
wn3_values.append(wn3)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn3_df = pd.DataFrame(wn3_values, columns=[
|
||||
f'wn3_{i}' for i in range(12)])
|
||||
print(wn3_df)
|
||||
# ---------计算temp-wn0-wn1-wn2-wn3------------------------------------------------------
|
||||
temp_wn0_wn1_wn2_wn3 = temp_wn0_wn1_wn2_df.values - wn3_df.values
|
||||
# 转换为 DataFrame
|
||||
temp_wn0_wn1_wn2_wn3_df = pd.DataFrame(
|
||||
temp_wn0_wn1_wn2_wn3, columns=temp_wn0_wn1_wn2_df.columns)
|
||||
# -------wn4 - ----------------------------------------------------------------------
|
||||
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 3 * x + phi) + C
|
||||
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results4 = []
|
||||
for index, row in temp_wn0_wn1_wn2_wn3_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results4.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results4.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results4.append((0, 0, 0))
|
||||
|
||||
fit_results4_df = pd.DataFrame(fit_results4, columns=['A', 'phi', 'C'])
|
||||
print(fit_results4_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn4_values = []
|
||||
for index, row in fit_results4_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn4 = single_harmonic(x, A, phi, C)
|
||||
wn4_values.append(wn4)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn4_df = pd.DataFrame(wn4_values, columns=[
|
||||
f'wn4_{i}' for i in range(12)])
|
||||
print(wn4_df)
|
||||
# ---------计算temp-wn0-wn1-wn2-wn3------------------------------------------------------
|
||||
temp_wn0_wn1_wn2_wn3_wn4 = temp_wn0_wn1_wn2_wn3_df.values - wn4_df.values
|
||||
# 转换为 DataFrame
|
||||
temp_wn0_wn1_wn2_wn3_wn4_df = pd.DataFrame(
|
||||
temp_wn0_wn1_wn2_wn3_wn4, columns=temp_wn0_wn1_wn2_wn3_df.columns)
|
||||
|
||||
# -------wn5-----------------------------------------------------------------------
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 2.4 * x + phi) + C
|
||||
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results5 = []
|
||||
for index, row in temp_wn0_wn1_wn2_wn3_wn4_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results5.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results5.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results5.append((0, 0, 0))
|
||||
# 将拟合结果转换为DataFrame
|
||||
fit_results5_df = pd.DataFrame(fit_results5, columns=['A', 'phi', 'C'])
|
||||
print(fit_results5_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn5_values = []
|
||||
for index, row in fit_results5_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn5 = single_harmonic(x, A, phi, C)
|
||||
wn5_values.append(wn5)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn5_df = pd.DataFrame(wn5_values, columns=[
|
||||
f'wn5_{i}' for i in range(12)])
|
||||
print(wn5_df)
|
||||
# ---------计算temp-wn0-wn1-wn2-wn3------------------------------------------------------
|
||||
temp_wn0_wn1_wn2_wn3_wn4_wn5 = temp_wn0_wn1_wn2_wn3_wn4_df.values - wn5_df.values
|
||||
# 转换为 DataFrame
|
||||
temp_wn0_wn1_wn2_wn3_wn4_wn5_df = pd.DataFrame(temp_wn0_wn1_wn2_wn3_wn4_wn5,
|
||||
columns=temp_wn0_wn1_wn2_wn3_wn4_df.columns)
|
||||
|
||||
# ------计算背景温度=wn0+wn1+wn2+wn3+wn4+wn5---------------------------------------------------
|
||||
background = wn5_df.values + wn4_df.values + \
|
||||
wn3_df.values + wn2_df.values + wn1_df.values
|
||||
# wn0只有一列单独处理相加
|
||||
# 使用 np.isnan 和 np.where 来判断是否为 NaN 或 0,避免这些值参与相加
|
||||
for i in range(21):
|
||||
wn0_value = wn0_df.iloc[i]
|
||||
# 只有当 wn0_value 既不是 NaN 也不是 0 时才加到 background 上
|
||||
if not np.isnan(wn0_value) and wn0_value != 0:
|
||||
background[i, :] += wn0_value
|
||||
# 扰动
|
||||
perturbation = temp_wn0_wn1_wn2_wn3_wn4_wn5_df
|
||||
# ---------傅里叶变换----------------------------------------------------------------------
|
||||
# 初始化一个新的DataFrame来保存处理结果
|
||||
result = pd.DataFrame(
|
||||
np.nan, index=perturbation.index, columns=perturbation.columns)
|
||||
# 定义滤波范围
|
||||
lambda_low = 2 # 2 km
|
||||
lambda_high = 15 # 15 km
|
||||
f_low = 2 * np.pi / lambda_high
|
||||
f_high = 2 * np.pi / lambda_low
|
||||
|
||||
# 循环处理perturbation中的每一列
|
||||
for col in perturbation.columns:
|
||||
x = perturbation[col]
|
||||
# 提取有效值
|
||||
valid_values = x.dropna()
|
||||
N = len(valid_values) # 有效值的数量
|
||||
|
||||
# 找到第一个有效值的索引
|
||||
first_valid_index = valid_values.index[0] if not valid_values.index.empty else None
|
||||
height_value = height_df.loc[first_valid_index] if first_valid_index is not None else None
|
||||
|
||||
# 如果有效值为空,则跳过该列
|
||||
if N == 0 or height_value is None:
|
||||
continue
|
||||
|
||||
# 时间序列和频率
|
||||
dt = 0.25
|
||||
n = np.arange(N)
|
||||
t = height_value.values + n * dt
|
||||
f = n / (N * dt)
|
||||
|
||||
# 傅里叶变换
|
||||
y = np.fft.fft(valid_values.values)
|
||||
|
||||
# 频率滤波
|
||||
yy = y.copy()
|
||||
freq_filter = (f < f_low) | (f > f_high)
|
||||
yy[freq_filter] = 0 # 过滤掉指定频段
|
||||
|
||||
# 逆傅里叶变换
|
||||
perturbation_after = np.real(np.fft.ifft(yy))
|
||||
|
||||
# 将处理结果插回到result矩阵中
|
||||
result.loc[valid_values.index, col] = perturbation_after
|
||||
u2 = result ** 2
|
||||
u2 = u2.mean(axis=1)
|
||||
return u2
|
||||
except FileNotFoundError:
|
||||
# 如果文件不存在,返回全NaN的Series
|
||||
expected_length = 21
|
||||
return pd.Series(np.nan, index=range(expected_length))
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------------------
|
||||
# --------meridional-------------------------------------------------------------------------
|
||||
def process_vmeridional_day(day, year):
|
||||
try:
|
||||
# 读取数据
|
||||
height_data = loadmat(rf"./tidi/data\{year}\{day:03d}_Height.mat")
|
||||
lat_data = loadmat(rf"./tidi/data\{year}\{day:03d}_Lat.mat")
|
||||
lon_data = loadmat(rf"./tidi/data\{year}\{day:03d}_Lon.mat")
|
||||
vmeridional_data = loadmat(
|
||||
rf"./tidi/data\{year}\{day:03d}_VMerdional.mat")
|
||||
vzonal_data = loadmat(rf"./tidi/data\{year}\{day:03d}_Vzonal.mat")
|
||||
|
||||
# 将数据转换为DataFrame
|
||||
height_df = pd.DataFrame(height_data['Height'])
|
||||
lat_df = pd.DataFrame(lat_data['Lat'])
|
||||
lon_df = pd.DataFrame(lon_data['Lon'])
|
||||
vmeridional_df = pd.DataFrame(vmeridional_data['VMerdional'])
|
||||
vzonal_df = pd.DataFrame(vzonal_data['Vzonal'])
|
||||
# 将经纬度拼接为两列并添加到对应的DataFrame中
|
||||
lon_lat_df = pd.concat([lon_df, lat_df], axis=1)
|
||||
lon_lat_df.columns = ['Longitude', 'Latitude']
|
||||
# 筛选出10到30度纬度范围的数据
|
||||
lat_filter = (lat_df.values >= 0) & (lat_df.values <= 20)
|
||||
# 使用纬度范围过滤数据
|
||||
vmeridional_filtered = vmeridional_df.iloc[:, lat_filter.flatten()]
|
||||
vzonal_filtered = vzonal_df.iloc[:, lat_filter.flatten()]
|
||||
lon_lat_filtered = lon_lat_df.iloc[lat_filter.flatten(), :]
|
||||
# 接着对lon_lat_filtered的经度进行分组,0到360度每30度一个区间
|
||||
bins = range(0, 361, 30)
|
||||
group_labels = [f"{i}-{i + 29}" for i in range(0, 360, 30)]
|
||||
|
||||
lon_lat_filtered['Longitude_Group'] = pd.cut(
|
||||
lon_lat_filtered['Longitude'], bins=bins, labels=group_labels)
|
||||
|
||||
# 获取所有唯一的经度分组标签并按照数值顺序排序
|
||||
unique_groups = sorted(lon_lat_filtered['Longitude_Group'].unique(
|
||||
), key=lambda x: int(x.split('-')[0]))
|
||||
# 按照经度分组获取每个区间对应的vzonal_filtered、vmeridional_filtered数据
|
||||
grouped_data = {}
|
||||
insufficient_data_count = 0 # 用于计数数据不足的组数
|
||||
|
||||
for group in unique_groups:
|
||||
mask = lon_lat_filtered['Longitude_Group'] == group
|
||||
grouped_data[group] = {
|
||||
'vzonal_filtered': vzonal_filtered.loc[:, mask],
|
||||
'vmeridional_filtered': vmeridional_filtered.loc[:, mask],
|
||||
'lon_lat_filtered': lon_lat_filtered.loc[mask]
|
||||
}
|
||||
|
||||
# 计算有效值数量
|
||||
vzonal_count = grouped_data[group]['vzonal_filtered'].notna(
|
||||
).sum().sum()
|
||||
vmeridional_count = grouped_data[group]['vmeridional_filtered'].notna(
|
||||
).sum().sum()
|
||||
|
||||
if vzonal_count <= 20 or vmeridional_count <= 20:
|
||||
insufficient_data_count += 1
|
||||
|
||||
# 如果超过6组数据不足,则抛出错误
|
||||
if insufficient_data_count > 6:
|
||||
expected_length = 21
|
||||
return pd.Series(np.nan, index=range(expected_length))
|
||||
|
||||
# 如果代码运行到这里,说明所有分组的数据量都足够或者不足的组数不超过6
|
||||
print("所有分组的数据量都足够")
|
||||
# -----------计算w0------------------------------------------------------------------------------------------
|
||||
# 定义期望的12个区间的分组标签
|
||||
expected_groups = [f"{i}-{i + 29}" for i in range(0, 360, 30)]
|
||||
# 初始化一个空DataFrame来存储所有区间的均值廓线,列名设置为期望的分组标签
|
||||
W0_profiles_df = pd.DataFrame(columns=expected_groups)
|
||||
|
||||
# 遍历grouped_data字典中的每个组
|
||||
for group, data in grouped_data.items():
|
||||
# 提取当前组的vzonal_filtered数据
|
||||
vmeridional_filtered = data['vmeridional_filtered']
|
||||
# 计算有效数据的均值廓线,跳过NaN值
|
||||
mean_profile = vmeridional_filtered.mean(axis=1, skipna=True)
|
||||
# 将当前组的均值廓线作为一列添加到W0_profiles_df DataFrame中
|
||||
W0_profiles_df[group] = mean_profile
|
||||
|
||||
# 检查并填充缺失的区间列,将缺失的列添加并填充为NaN
|
||||
for group in expected_groups:
|
||||
if group not in W0_profiles_df.columns:
|
||||
W0_profiles_df[group] = pd.Series(
|
||||
[float('NaN')] * len(W0_profiles_df))
|
||||
|
||||
# 打印拼接后的DataFrame以验证
|
||||
print("Concatenated mean profiles for all longitude groups:\n", W0_profiles_df)
|
||||
# 计算每个高度的均值
|
||||
height_mean_profiles = W0_profiles_df.mean(axis=1)
|
||||
# 将每个高度的均值作为新的一行添加到DataFrame中,All_Heights_Mean就是wn0
|
||||
W0_profiles_df['All_Heights_Mean'] = height_mean_profiles
|
||||
wn0_df = W0_profiles_df['All_Heights_Mean']
|
||||
# -------计算残余量--------------------------------------------------------------------------------------
|
||||
# 计算每个经度区间的残余值 (即每个区间的值减去该高度的All_Heights_Mean)
|
||||
residuals_df = W0_profiles_df.drop(columns='All_Heights_Mean').subtract(
|
||||
W0_profiles_df['All_Heights_Mean'], axis=0)
|
||||
|
||||
# --------wn1-------------------------
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 12 * x + phi) + C
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results = []
|
||||
for index, row in residuals_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results.append((0, 0, 0))
|
||||
# 将拟合结果转换为DataFrame
|
||||
fit_results_df = pd.DataFrame(fit_results, columns=['A', 'phi', 'C'])
|
||||
print(fit_results_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn1_values = []
|
||||
for index, row in fit_results_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn1 = single_harmonic(x, A, phi, C)
|
||||
wn1_values.append(wn1)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn1_df = pd.DataFrame(wn1_values, columns=[
|
||||
f'wn1_{i}' for i in range(12)])
|
||||
print(wn1_df)
|
||||
# 如果wn1_df全为0,则跳过下面的计算,直接令该天的day_log_gwresult全部为NaN
|
||||
if (wn1_df == 0).all().all():
|
||||
return pd.Series(np.nan, index=range(21))
|
||||
# ------------计算temp-wn0-wn1---------------------------------------------------------
|
||||
temp_wn0_wn1 = residuals_df.values - wn1_df.values
|
||||
# 将结果转为 DataFrame
|
||||
temp_wn0_wn1_df = pd.DataFrame(
|
||||
temp_wn0_wn1, columns=residuals_df.columns, index=residuals_df.index)
|
||||
|
||||
# -------wn2--------------------------------------------------------------------------
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 6 * x + phi) + C
|
||||
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results2 = []
|
||||
for index, row in temp_wn0_wn1_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results2.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results2.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results2.append((0, 0, 0))
|
||||
# 将拟合结果转换为DataFrame
|
||||
fit_results2_df = pd.DataFrame(fit_results2, columns=['A', 'phi', 'C'])
|
||||
print(fit_results2_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn2_values = []
|
||||
for index, row in fit_results2_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn2 = single_harmonic(x, A, phi, C)
|
||||
wn2_values.append(wn2)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn2_df = pd.DataFrame(wn2_values, columns=[
|
||||
f'wn2_{i}' for i in range(12)])
|
||||
print(wn2_df)
|
||||
# ---------计算temp-wn0-wn1-wn2------------------------------------------------------
|
||||
temp_wn0_wn1_wn2 = temp_wn0_wn1_df.values - wn2_df.values
|
||||
# 转换为 DataFrame
|
||||
temp_wn0_wn1_wn2_df = pd.DataFrame(
|
||||
temp_wn0_wn1_wn2, columns=temp_wn0_wn1_df.columns)
|
||||
|
||||
# -------wn3-----------------------------------------------------------------------
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 4 * x + phi) + C
|
||||
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results3 = []
|
||||
for index, row in temp_wn0_wn1_wn2_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results3.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results3.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results3.append((0, 0, 0))
|
||||
# 将拟合结果转换为DataFrame
|
||||
fit_results3_df = pd.DataFrame(fit_results3, columns=['A', 'phi', 'C'])
|
||||
print(fit_results3_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn3_values = []
|
||||
for index, row in fit_results3_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn3 = single_harmonic(x, A, phi, C)
|
||||
wn3_values.append(wn3)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn3_df = pd.DataFrame(wn3_values, columns=[
|
||||
f'wn3_{i}' for i in range(12)])
|
||||
print(wn3_df)
|
||||
# ---------计算temp-wn0-wn1-wn2-wn3------------------------------------------------------
|
||||
temp_wn0_wn1_wn2_wn3 = temp_wn0_wn1_wn2_df.values - wn3_df.values
|
||||
# 转换为 DataFrame
|
||||
temp_wn0_wn1_wn2_wn3_df = pd.DataFrame(
|
||||
temp_wn0_wn1_wn2_wn3, columns=temp_wn0_wn1_wn2_df.columns)
|
||||
# -------wn4 - ----------------------------------------------------------------------
|
||||
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 3 * x + phi) + C
|
||||
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results4 = []
|
||||
for index, row in temp_wn0_wn1_wn2_wn3_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results4.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results4.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results4.append((0, 0, 0))
|
||||
|
||||
fit_results4_df = pd.DataFrame(fit_results4, columns=['A', 'phi', 'C'])
|
||||
print(fit_results4_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn4_values = []
|
||||
for index, row in fit_results4_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn4 = single_harmonic(x, A, phi, C)
|
||||
wn4_values.append(wn4)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn4_df = pd.DataFrame(wn4_values, columns=[
|
||||
f'wn4_{i}' for i in range(12)])
|
||||
print(wn4_df)
|
||||
# ---------计算temp-wn0-wn1-wn2-wn3------------------------------------------------------
|
||||
temp_wn0_wn1_wn2_wn3_wn4 = temp_wn0_wn1_wn2_wn3_df.values - wn4_df.values
|
||||
# 转换为 DataFrame
|
||||
temp_wn0_wn1_wn2_wn3_wn4_df = pd.DataFrame(
|
||||
temp_wn0_wn1_wn2_wn3_wn4, columns=temp_wn0_wn1_wn2_wn3_df.columns)
|
||||
|
||||
# -------wn5-----------------------------------------------------------------------
|
||||
def single_harmonic(x, A, phi, C):
|
||||
return A * np.sin(2 * np.pi / 2.4 * x + phi) + C
|
||||
|
||||
# 用于存储每个高度拟合后的参数结果
|
||||
fit_results5 = []
|
||||
for index, row in temp_wn0_wn1_wn2_wn3_wn4_df.iterrows():
|
||||
# 检查该行是否存在NaN值,如果有则跳过拟合,直接将参数设为0
|
||||
if row.isnull().any():
|
||||
fit_results5.append((0, 0, 0))
|
||||
continue
|
||||
x = np.arange(12) # 对应12个位置作为自变量
|
||||
y = row.values
|
||||
try:
|
||||
# 进行曲线拟合
|
||||
popt, _ = curve_fit(single_harmonic, x, y)
|
||||
fit_results5.append(popt)
|
||||
except RuntimeError:
|
||||
# 如果拟合过程出现问题(例如无法收敛等),也将参数设为0
|
||||
fit_results5.append((0, 0, 0))
|
||||
# 将拟合结果转换为DataFrame
|
||||
fit_results5_df = pd.DataFrame(fit_results5, columns=['A', 'phi', 'C'])
|
||||
print(fit_results5_df)
|
||||
# 用于存储每个高度的拟合值
|
||||
wn5_values = []
|
||||
for index, row in fit_results5_df.iterrows():
|
||||
A, phi, C = row
|
||||
x = np.arange(12) # 同样对应12个位置作为自变量
|
||||
wn5 = single_harmonic(x, A, phi, C)
|
||||
wn5_values.append(wn5)
|
||||
# 将拟合值转换为DataFrame
|
||||
wn5_df = pd.DataFrame(wn5_values, columns=[
|
||||
f'wn5_{i}' for i in range(12)])
|
||||
print(wn5_df)
|
||||
# ---------计算temp-wn0-wn1-wn2-wn3------------------------------------------------------
|
||||
temp_wn0_wn1_wn2_wn3_wn4_wn5 = temp_wn0_wn1_wn2_wn3_wn4_df.values - wn5_df.values
|
||||
# 转换为 DataFrame
|
||||
temp_wn0_wn1_wn2_wn3_wn4_wn5_df = pd.DataFrame(temp_wn0_wn1_wn2_wn3_wn4_wn5,
|
||||
columns=temp_wn0_wn1_wn2_wn3_wn4_df.columns)
|
||||
|
||||
# ------计算背景温度=wn0+wn1+wn2+wn3+wn4+wn5---------------------------------------------------
|
||||
background = wn5_df.values + wn4_df.values + \
|
||||
wn3_df.values + wn2_df.values + wn1_df.values
|
||||
# wn0只有一列单独处理相加
|
||||
# 使用 np.isnan 和 np.where 来判断是否为 NaN 或 0,避免这些值参与相加
|
||||
for i in range(21):
|
||||
wn0_value = wn0_df.iloc[i]
|
||||
# 只有当 wn0_value 既不是 NaN 也不是 0 时才加到 background 上
|
||||
if not np.isnan(wn0_value) and wn0_value != 0:
|
||||
background[i, :] += wn0_value
|
||||
# 扰动
|
||||
perturbation = temp_wn0_wn1_wn2_wn3_wn4_wn5_df
|
||||
# ---------傅里叶变换----------------------------------------------------------------------
|
||||
# 初始化一个新的DataFrame来保存处理结果
|
||||
result = pd.DataFrame(
|
||||
np.nan, index=perturbation.index, columns=perturbation.columns)
|
||||
# 定义滤波范围
|
||||
lambda_low = 2 # 2 km
|
||||
lambda_high = 15 # 15 km
|
||||
f_low = 2 * np.pi / lambda_high
|
||||
f_high = 2 * np.pi / lambda_low
|
||||
|
||||
# 循环处理perturbation中的每一列
|
||||
for col in perturbation.columns:
|
||||
x = perturbation[col]
|
||||
# 提取有效值
|
||||
valid_values = x.dropna()
|
||||
N = len(valid_values) # 有效值的数量
|
||||
|
||||
# 找到第一个有效值的索引
|
||||
first_valid_index = valid_values.index[0] if not valid_values.index.empty else None
|
||||
height_value = height_df.loc[first_valid_index] if first_valid_index is not None else None
|
||||
|
||||
# 如果有效值为空,则跳过该列
|
||||
if N == 0 or height_value is None:
|
||||
continue
|
||||
|
||||
# 时间序列和频率
|
||||
dt = 0.25
|
||||
n = np.arange(N)
|
||||
t = height_value.values + n * dt
|
||||
f = n / (N * dt)
|
||||
|
||||
# 傅里叶变换
|
||||
y = np.fft.fft(valid_values.values)
|
||||
|
||||
# 频率滤波
|
||||
yy = y.copy()
|
||||
freq_filter = (f < f_low) | (f > f_high)
|
||||
yy[freq_filter] = 0 # 过滤掉指定频段
|
||||
|
||||
# 逆傅里叶变换
|
||||
perturbation_after = np.real(np.fft.ifft(yy))
|
||||
|
||||
# 将处理结果插回到result矩阵中
|
||||
result.loc[valid_values.index, col] = perturbation_after
|
||||
v2 = result ** 2
|
||||
v2 = v2.mean(axis=1)
|
||||
return v2
|
||||
except FileNotFoundError:
|
||||
# 如果文件不存在,返回全NaN的Series
|
||||
expected_length = 21
|
||||
return pd.Series(np.nan, index=range(expected_length))
|
||||
# 初始化一个空的DataFrame来存储所有天的结果
|
||||
|
||||
|
||||
# 初始化一个空的DataFrame来存储所有天的结果
|
||||
all_days_vzonal_results = pd.DataFrame()
|
||||
|
||||
# 循环处理每一天的数据
|
||||
for day in range(1, 365):
|
||||
u2 = process_vzonal_day(day, 2019)
|
||||
if u2 is not None:
|
||||
all_days_vzonal_results[rf"{day:02d}"] = u2
|
||||
|
||||
# 将结果按列拼接
|
||||
# all_days_vzonal_results.columns = [f"{day:02d}" for day in range(1, 365)]
|
||||
|
||||
all_days_vmeridional_results = pd.DataFrame()
|
||||
|
||||
# 循环处理每一天的数据
|
||||
for day in range(1, 365):
|
||||
v2 = process_vmeridional_day(day, 2019)
|
||||
if v2 is not None:
|
||||
all_days_vmeridional_results[rf"{day:02d}"] = v2
|
||||
|
||||
# 将结果按列拼接
|
||||
# all_days_vmeridional_results.columns = [f"{day:02d}" for day in range(1, 365)]
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------
|
||||
# --------经纬向风平方和计算动能--------------------------------------------------------------------------------
|
||||
|
||||
# 使用numpy.where来检查两个表格中的对应元素是否都不是NaN
|
||||
sum_df = np.where(
|
||||
pd.notna(all_days_vmeridional_results) & pd.notna(all_days_vzonal_results),
|
||||
all_days_vmeridional_results + all_days_vzonal_results,
|
||||
np.nan
|
||||
)
|
||||
HP = 1/2*all_days_vmeridional_results+1/2*all_days_vzonal_results
|
||||
heights = [70.0, 72.5, 75.0, 77.5, 80.0, 82.5, 85.0, 87.5, 90.0, 92.5,
|
||||
95.0, 97.5, 100.0, 102.5, 105.0, 107.5, 110.0, 112.5, 115.0, 117.5, 120.0]
|
||||
HP.index = heights
|
||||
# # 将 DataFrame 保存为 Excel 文件
|
||||
# HP.to_excel('HP_data.xlsx')
|
||||
# ----------绘年统计图------------------------------------------------------------------------------------------------------------
|
||||
data = HP
|
||||
# 使用 reset_index() 方法将索引变为第一列
|
||||
data = data.reset_index()
|
||||
h = data.iloc[:, 0].copy() # 高度,保留作为纵坐标
|
||||
dates = list(range(1, data.shape[1])) # 日期,作为横坐标
|
||||
data0 = data.iloc[:, 1:].copy() # 绘图数据
|
||||
'''数据处理'''
|
||||
# 反转 h 以确保高度从下往上递增
|
||||
h_reversed = h[::-1].reset_index(drop=True)
|
||||
data0_reversed = data0[::-1].reset_index(drop=True)
|
||||
# 将数值大于20的数据点替换为nan
|
||||
data0_reversed[data0_reversed > 20] = float('nan')
|
||||
# 转换成月份,365天
|
||||
days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
|
||||
# 将日期转换为英文月份
|
||||
|
||||
|
||||
def day_to_month(day):
|
||||
# 累积每个月的天数,找到对应的月份
|
||||
cumulative_days = 0
|
||||
for i, days in enumerate(days_in_month):
|
||||
cumulative_days += days
|
||||
if day <= cumulative_days:
|
||||
return f'{["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"][i]}'
|
||||
|
||||
|
||||
months = [day_to_month(day) for day in dates]
|
||||
|
||||
|
||||
'''绘图'''
|
||||
plt.rcParams['font.family'] = 'SimHei' # 宋体
|
||||
plt.rcParams['font.size'] = 12 # 中文字号
|
||||
plt.rcParams['axes.unicode_minus'] = False # 正确显示负号
|
||||
plt.rcParams['font.sans-serif'] = 'Times New Roman' # 新罗马
|
||||
plt.rcParams['axes.labelsize'] = 14 # 坐标轴标签字号
|
||||
plt.rcParams['xtick.labelsize'] = 12 # x轴刻度字号
|
||||
plt.rcParams['ytick.labelsize'] = 12 # y轴刻度字号
|
||||
plt.rcParams['legend.fontsize'] = 16 # 图例字号
|
||||
plt.rcParams['axes.unicode_minus'] = False # 正确显示负号
|
||||
plt.figure(figsize=(10, 6)) # 设置图像大小
|
||||
# 绘制热力图,设置 x 和 y 轴的标签
|
||||
sns.heatmap(data0_reversed, annot=False, cmap='YlGnBu', linewidths=0.5,
|
||||
yticklabels=h_reversed, xticklabels=months, cbar_kws={'label': ' Gravitational potential energy'})
|
||||
|
||||
# 横坐标过长,设置等间隔展示
|
||||
interval = 34 # 横坐标显示间隔
|
||||
plt.xticks(ticks=range(0, len(dates), interval),
|
||||
labels=months[::interval], rotation=45) # rotation旋转可不加
|
||||
|
||||
# 添加轴标签
|
||||
plt.xlabel('Month') # X轴标签
|
||||
plt.ylabel('Height') # Y轴标签
|
||||
|
||||
# 显示图形
|
||||
plt.show()
|
||||
# --------------绘制月统计图-------------------------------------------------------------------
|
||||
# 获取HP的列数
|
||||
num_cols = HP.shape[1]
|
||||
# 用于存储按要求计算出的均值列数据
|
||||
mean_cols = []
|
||||
start = 0
|
||||
while start < num_cols:
|
||||
end = start + 30
|
||||
if end > num_cols:
|
||||
end = num_cols
|
||||
# 提取每30列(或不满30列的剩余部分)的数据
|
||||
subset = HP.iloc[:, start:end]
|
||||
# 计算该部分数据每一行的均值,得到一个Series,作为新的均值列
|
||||
mean_series = subset.mean(axis=1)
|
||||
mean_cols.append(mean_series)
|
||||
start = end
|
||||
# 将所有的均值列合并成一个新的DataFrame
|
||||
result_df = pd.concat(mean_cols, axis=1)
|
||||
# 对result_df中的每一个元素取自然对数
|
||||
result_df_log = result_df.applymap(lambda x: np.log(x))
|
||||
# 通过drop方法删除第一行,axis=0表示按行操作,inplace=True表示直接在原DataFrame上修改(若不想修改原DataFrame可设置为False)
|
||||
result_df_log.drop(70, axis=0, inplace=True)
|
||||
# 计算每个月的平均值
|
||||
monthly_average = result_df_log.mean(axis=0)
|
||||
# 将结果转换为 (1, 12) 形状
|
||||
monthly_average = monthly_average.values.reshape(1, 12)
|
||||
monthly_average = monthly_average.ravel()
|
||||
|
||||
# 生成x轴的月份标签
|
||||
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
||||
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
|
||||
|
||||
# 绘制折线图
|
||||
plt.plot(months, monthly_average, marker='o', linestyle='-', color='b')
|
||||
|
||||
# 添加标题和标签
|
||||
plt.title("Monthly Average ENERGY(log)")
|
||||
plt.xlabel("Month")
|
||||
plt.ylabel("Average Energy")
|
||||
# 显示图表
|
||||
plt.xticks(rotation=45) # 让月份标签更清晰可读
|
||||
plt.grid(True)
|
||||
plt.tight_layout()
|
||||
# 显示图形
|
||||
plt.show()
|
||||
@ -15,7 +15,7 @@ HEIGHTS_CONSTANT = [
|
||||
]
|
||||
|
||||
|
||||
def tidi_process_idk(
|
||||
def extract_tidi_pw_data(
|
||||
year,
|
||||
input_height: float = 82.5,
|
||||
lat_range: tuple = (0, 5),
|
||||
Loading…
x
Reference in New Issue
Block a user