2.2 計(jì)算適應(yīng)度

2.3 位置更新

根據(jù)alpha、beta和delta狼的位置來(lái)更新其余灰狼的位置,更新公式如下:

2.3.1?距離向量計(jì)算

2.3.2?計(jì)算新的位置向量

2.3.3 更新灰狼位置

1.2?流程圖

兩圖分別為灰狼優(yōu)化算法流程圖、灰狼的等級(jí)制度(從上到下的優(yōu)勢(shì)遞減),灰狼優(yōu)化算法通過(guò)模擬灰狼獵食過(guò)程中領(lǐng)袖狼和群體狼的行為,利用領(lǐng)袖狼的位置引導(dǎo)其他灰狼逐步逼近獵物(全局最優(yōu)解),從而實(shí)現(xiàn)全局優(yōu)化,其核心在于通過(guò)位置更新公式,不斷逼近最優(yōu)解

2. 灰狼算法優(yōu)缺點(diǎn)

優(yōu)點(diǎn):與其他優(yōu)化算法相比,灰狼算法的優(yōu)化過(guò)程更快,因?yàn)樗鼈兿鹊贸龃鸢福侔巡煌鸢高M(jìn)行比較并相應(yīng)地進(jìn)行排序,以此輸出最佳解決方案缺點(diǎn):灰狼優(yōu)化算法屬于啟發(fā)式優(yōu)化算法,產(chǎn)生的最優(yōu)解僅接近于原始最優(yōu)解,并不是問(wèn)題真正的最優(yōu)解

3. 灰狼算法實(shí)現(xiàn)

3.1?灰狼算法在簡(jiǎn)單函數(shù)上求最值

import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False

# 定義優(yōu)化問(wèn)題的目標(biāo)函數(shù)
def objective_function(x):
return np.sum(x ** 2 + x)

# 初始化灰狼優(yōu)化算法的參數(shù)
dim = 2 # 解的維度
num_wolves = 300 # 種群大小
max_iter = 100 # 最大迭代次數(shù)

# 初始化灰狼種群的位置
wolves = np.random.uniform(-10, 10, (num_wolves, dim))

# 初始化 alpha、beta、delta 狼的位置
alpha_pos = np.zeros(dim)
beta_pos = np.zeros(dim)
delta_pos = np.zeros(dim)
alpha_score = float("inf")
beta_score = float("inf")
delta_score = float("inf")

# 開(kāi)始迭代
a = 2 # 控制參數(shù),逐漸減小
convergence_curve = []

for t in range(max_iter):
for i in range(num_wolves):
fitness = objective_function(wolves[i])

if fitness < alpha_score:
alpha_score = fitness
alpha_pos = wolves[i].copy()
elif fitness < beta_score:
beta_score = fitness
beta_pos = wolves[i].copy()
elif fitness < delta_score:
delta_score = fitness
delta_pos = wolves[i].copy()

a = 2 - t * (2 / max_iter) # 線性減少 a

for i in range(num_wolves):
for j in range(dim):
r1, r2 = np.random.rand(), np.random.rand()
A1 = 2 * a * r1 - a
C1 = 2 * r2
D_alpha = abs(C1 * alpha_pos[j] - wolves[i][j])
X1 = alpha_pos[j] - A1 * D_alpha

r1, r2 = np.random.rand(), np.random.rand()
A2 = 2 * a * r1 - a
C2 = 2 * r2
D_beta = abs(C2 * beta_pos[j] - wolves[i][j])
X2 = beta_pos[j] - A2 * D_beta

r1, r2 = np.random.rand(), np.random.rand()
A3 = 2 * a * r1 - a
C3 = 2 * r2
D_delta = abs(C3 * delta_pos[j] - wolves[i][j])
X3 = delta_pos[j] - A3 * D_delta

wolves[i][j] = (X1 + X2 + X3) / 3

convergence_curve.append(alpha_score)

print(f"最優(yōu)解: {alpha_pos}")
print(f"最優(yōu)值: {alpha_score}")

# 繪制收斂曲線
plt.figure(figsize=(15,5))
plt.plot(convergence_curve)
plt.title("收斂曲線")
plt.xlabel("迭代次數(shù)")
plt.ylabel("適應(yīng)度值")
plt.show()

# 可視化灰狼種群位置
if dim == 2:
plt.figure(figsize=(15,5))
plt.scatter(wolves[:, 0], wolves[:, 1], c='blue', label='Wolves')
plt.scatter(alpha_pos[0], alpha_pos[1], c='red', label='Alpha', marker='x')
plt.scatter(beta_pos[0], beta_pos[1], c='green', label='Beta', marker='x')
plt.scatter(delta_pos[0], delta_pos[1], c='purple', label='Delta', marker='x')
plt.title("灰狼種群位置")
plt.legend()
plt.show()

3.2?灰狼算法在標(biāo)準(zhǔn)測(cè)試函數(shù)Rastrigin上求最值

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

plt.rcParams['font.sans-serif'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False

# 定義優(yōu)化問(wèn)題的目標(biāo)函數(shù)(Rastrigin函數(shù))
def objective_function(x):
return 10 * len(x) + np.sum(x ** 2 - 10 * np.cos(2 * np.pi * x))

# 初始化灰狼優(yōu)化算法的參數(shù)
dim = 2 # 解的維度
num_wolves = 300 # 種群大小
max_iter = 100 # 最大迭代次數(shù)

# 初始化灰狼種群的位置
wolves = np.random.uniform(-5.12, 5.12, (num_wolves, dim))

# 初始化 alpha、beta、delta 狼的位置
alpha_pos = np.zeros(dim)
beta_pos = np.zeros(dim)
delta_pos = np.zeros(dim)
alpha_score = float("inf")
beta_score = float("inf")
delta_score = float("inf")

# 開(kāi)始迭代
a = 2 # 控制參數(shù),逐漸減小
convergence_curve = []

for t in range(max_iter):
for i in range(num_wolves):
fitness = objective_function(wolves[i])

if fitness < alpha_score:
alpha_score = fitness
alpha_pos = wolves[i].copy()
elif fitness < beta_score:
beta_score = fitness
beta_pos = wolves[i].copy()
elif fitness < delta_score:
delta_score = fitness
delta_pos = wolves[i].copy()

a = 2 - t * (2 / max_iter) # 線性減少 a

for i in range(num_wolves):
for j in range(dim):
r1, r2 = np.random.rand(), np.random.rand()
A1 = 2 * a * r1 - a
C1 = 2 * r2
D_alpha = abs(C1 * alpha_pos[j] - wolves[i][j])
X1 = alpha_pos[j] - A1 * D_alpha

r1, r2 = np.random.rand(), np.random.rand()
A2 = 2 * a * r1 - a
C2 = 2 * r2
D_beta = abs(C2 * beta_pos[j] - wolves[i][j])
X2 = beta_pos[j] - A2 * D_beta

r1, r2 = np.random.rand(), np.random.rand()
A3 = 2 * a * r1 - a
C3 = 2 * r2
D_delta = abs(C3 * delta_pos[j] - wolves[i][j])
X3 = delta_pos[j] - A3 * D_delta

wolves[i][j] = (X1 + X2 + X3) / 3

convergence_curve.append(alpha_score)

print(f"最優(yōu)解: {alpha_pos}")
print(f"最優(yōu)值: {alpha_score}")

plt.figure(figsize=(15,5))
plt.plot(convergence_curve)
plt.title("收斂曲線")
plt.xlabel("迭代次數(shù)")
plt.ylabel("適應(yīng)度值")
plt.show()

if dim == 2:
x = np.linspace(-5.12, 5.12, 400)
y = np.linspace(-5.12, 5.12, 400)
X, Y = np.meshgrid(x, y)
Z = 10 * 2 + (X ** 2 - 10 * np.cos(2 * np.pi * X)) + (Y ** 2 - 10 * np.cos(2 * np.pi * Y))
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, cmap='viridis', alpha=0.2)
ax.scatter(alpha_pos[0], alpha_pos[1], alpha_score, c='red', label='Alpha', marker='x')
ax.scatter(beta_pos[0], beta_pos[1], beta_score, c='green', label='Beta', marker='x')
ax.scatter(delta_pos[0], delta_pos[1], delta_score, c='purple', label='Delta', marker='x')
ax.set_title("Rastrigin求解結(jié)果")
ax.legend()
plt.show()

Rastrigin函數(shù)是一種常用于測(cè)試優(yōu)化算法性能的多模態(tài)函數(shù),其數(shù)學(xué)表達(dá)式為:

4. 灰狼算法在模型上優(yōu)化的運(yùn)用

4.1 數(shù)據(jù)簡(jiǎn)單預(yù)處理

import pandas as pd
import numpy as np
df = pd.read_excel('數(shù)據(jù).xlsx',index_col=0, parse_dates=['數(shù)據(jù)時(shí)間'])

# 數(shù)據(jù)預(yù)處理
df_max = np.max(df['總有功功率(kw)'])
df_min = np.min(df['總有功功率(kw)'])
df_bz = (df['總有功功率(kw)']-df_min)/(df_max-df_min)

def prepare_data(data, win_size):
X = []
y = []
for i in range(len(data) - win_size):
temp_x = data[i:i + win_size]
temp_y = data[i + win_size]
X.append(temp_x)
y.append(temp_y)
X = np.asarray(X)
y = np.asarray(y)
X = np.expand_dims(X, axis=-1)

return X, y
win_size = 12
X, y = prepare_data(df_bz.values, win_size)
train_size = int(len(X) * 0.7)
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]

4.2?灰狼算法尋找最優(yōu)參數(shù)

import tensorflow.compat.v1 as tf
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
from tcn import TCN

# 關(guān)閉TensorFlow 2.x的急切執(zhí)行
tf.disable_eager_execution()

# 定義目標(biāo)函數(shù)
def objective_function(params):
dense_1, dense_2, filters1 = params
dense_1, dense_2, filters1 = int(dense_1), int(dense_2), int(filters1)

model = Sequential()
model.add(TCN(nb_filters=filters1, kernel_size=6, activation='relu', input_shape=(win_size, 1), dilations=[1, 2, 4, 8, 16]))
model.add(Flatten())
model.add(Dense(dense_1, activation='relu'))
model.add(Dense(dense_2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='adam', loss='mse')
history = model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test), verbose=0)
val_loss = min(history.history['val_loss'])

return val_loss

# 初始化GWO參數(shù)
dim = 3 # 超參數(shù)的數(shù)量
num_wolves = 5 # 灰狼種群的大小
max_iter = 20 # 最大迭代次數(shù)
lower_bound = [32, 64, 32] # 超參數(shù)的下界
upper_bound = [128, 256, 128] # 超參數(shù)的上界

# 初始化灰狼種群的位置
wolves = np.random.uniform(lower_bound, upper_bound, (num_wolves, dim))

# 初始化 alpha、beta、delta 狼的位置
alpha_pos = np.zeros(dim)
beta_pos = np.zeros(dim)
delta_pos = np.zeros(dim)
alpha_score = float("inf")
beta_score = float("inf")
delta_score = float("inf")

# 開(kāi)始迭代
a = 2 # 控制參數(shù),逐漸減小
convergence_curve = []

for t in range(max_iter):
for i in range(num_wolves):
fitness = objective_function(wolves[i])

if fitness < alpha_score:
alpha_score = fitness
alpha_pos = wolves[i].copy()
elif fitness < beta_score:
beta_score = fitness
beta_pos = wolves[i].copy()
elif fitness < delta_score:
delta_score = fitness
delta_pos = wolves[i].copy()

a = 2 - t * (2 / max_iter) # 線性減少 a

for i in range(num_wolves):
for j in range(dim):
r1, r2 = np.random.rand(), np.random.rand()
A1 = 2 * a * r1 - a
C1 = 2 * r2
D_alpha = abs(C1 * alpha_pos[j] - wolves[i][j])
X1 = alpha_pos[j] - A1 * D_alpha

r1, r2 = np.random.rand(), np.random.rand()
A2 = 2 * a * r1 - a
C2 = 2 * r2
D_beta = abs(C2 * beta_pos[j] - wolves[i][j])
X2 = beta_pos[j] - A2 * D_beta

r1, r2 = np.random.rand(), np.random.rand()
A3 = 2 * a * r1 - a
C3 = 2 * r2
D_delta = abs(C3 * delta_pos[j] - wolves[i][j])
X3 = delta_pos[j] - A3 * D_delta

wolves[i][j] = np.clip((X1 + X2 + X3) / 3, lower_bound[j], upper_bound[j])

convergence_curve.append(alpha_score)

print(f"最優(yōu)解: {alpha_pos}")
print(f"最優(yōu)值: {alpha_score}")

plt.plot(convergence_curve)
plt.title("收斂曲線")
plt.xlabel("迭代次數(shù)")
plt.ylabel("驗(yàn)證損失")
plt.show()

在這里目標(biāo)函數(shù) objective_function(params) 負(fù)責(zé)創(chuàng)建和訓(xùn)練模型,并返回驗(yàn)證集上的最小損失值(驗(yàn)證損失),它的輸入 params 是一個(gè)包含超參數(shù)的列表,灰狼算法初始化值如下:

dim = 3  # 超參數(shù)的數(shù)量
num_wolves = 5 # 灰狼種群的大小
max_iter = 20 # 最大迭代次數(shù)
lower_bound = [32, 64, 32] # 超參數(shù)的下界
upper_bound = [128, 256, 128] # 超參數(shù)的上界

這里的參數(shù)值都設(shè)置的比較小,實(shí)際上應(yīng)該根據(jù)任務(wù)復(fù)雜度進(jìn)行調(diào)整,通常較大的種群、較大迭代次數(shù),可以提供更好的解決結(jié)果,當(dāng)然計(jì)算成本也會(huì)增加,這里只是為了演示如何利用灰狼算法進(jìn)行超參數(shù)搜索,通過(guò)算法我們輸出了當(dāng)前搜索范圍,迭代次數(shù)下的最優(yōu)參數(shù),接下來(lái)使用該參數(shù)進(jìn)行模型訓(xùn)練

4.3?最優(yōu)參數(shù)下的模型訓(xùn)練

# 使用最優(yōu)超參數(shù)重新訓(xùn)練模型
dense_1, dense_2, filters1 = map(int, alpha_pos)

model = Sequential()
model.add(TCN(nb_filters=filters1, kernel_size=6, activation='relu', input_shape=(win_size, 1), dilations=[1, 2, 4, 8, 16]))
model.add(Flatten())
model.add(Dense(dense_1, activation='relu'))
model.add(Dense(dense_2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='adam', loss='mse')

history = model.fit(X_train, y_train, epochs=100, batch_size=32, validation_data=(X_test, y_test), verbose=0)

# 可視化訓(xùn)練集和測(cè)試集的損失
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()

文章轉(zhuǎn)自微信公眾號(hào)@Python機(jī)器學(xué)習(xí)AI

上一篇:

利用Pytorch框架構(gòu)建lstm時(shí)間序列預(yù)測(cè)模型

下一篇:

SVM多分類(lèi)分析:SHAP解釋各類(lèi)別下各特征對(duì)模型的影響力

我們有何不同?

API服務(wù)商零注冊(cè)

多API并行試用

數(shù)據(jù)驅(qū)動(dòng)選型,提升決策效率

查看全部API→
??

熱門(mén)場(chǎng)景實(shí)測(cè),選對(duì)API

#AI文本生成大模型API

對(duì)比大模型API的內(nèi)容創(chuàng)意新穎性、情感共鳴力、商業(yè)轉(zhuǎn)化潛力

25個(gè)渠道
一鍵對(duì)比試用API 限時(shí)免費(fèi)

#AI深度推理大模型API

對(duì)比大模型API的邏輯推理準(zhǔn)確性、分析深度、可視化建議合理性

10個(gè)渠道
一鍵對(duì)比試用API 限時(shí)免費(fèi)