Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
点
点头人工智能课程-v6.0-通识
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
靓靓
点头人工智能课程-v6.0-通识
Commits
deb2e841
Commit
deb2e841
authored
Jul 03, 2025
by
前钰
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Update AI_Math.md
parent
8d3ee023
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
49 additions
and
60 deletions
+49
-60
AI_Math.md
3-Python编程基础/3.4-AI数学基础与应用/AI_Math.md
+49
-60
No files found.
3-Python编程基础/3.4-AI数学基础与应用/AI_Math.md
View file @
deb2e841
# 人工智
能中的数学基础与代码映射
# 人工智
能中的数学基础与代码映射
...
@@ -13,17 +13,13 @@
...
@@ -13,17 +13,13 @@
**代码示例:用导数表示函数斜率**
**代码示例:用导数表示函数斜率**
```
python
```
python
import
numpy
as
np
import
numpy
as
np
# 导入 numpy 库,用于数值计算
def
f
(
x
):
return
x
**
2
+
3
*
x
+
2
# 定义一个简单的函数 f(x) = x² + 3x + 2
def
f
(
x
):
def
numerical_derivative
(
f
,
x
,
eps
=
1e-6
):
return
(
f
(
x
+
eps
)
-
f
(
x
-
eps
))
/
(
2
*
eps
)
# 用中心差分法计算函数 f 在 x 处的数值导数
return
x
**
2
+
3
*
x
+
2
# 数值导数
x
=
1.0
# 指定要求导的点 x = 1.0
def
numerical_derivative
(
f
,
x
,
eps
=
1e-6
):
print
(
"导数值:"
,
numerical_derivative
(
f
,
x
))
# 计算并打印 f(x) 在 x=1.0 处的导数值
return
(
f
(
x
+
eps
)
-
f
(
x
-
eps
))
/
(
2
*
eps
)
x
=
1.0
print
(
"导数值:"
,
numerical_derivative
(
f
,
x
))
```
```
...
@@ -34,16 +30,15 @@ print("导数值:", numerical_derivative(f, x))
...
@@ -34,16 +30,15 @@ print("导数值:", numerical_derivative(f, x))
**代码示例:简单的链式法则演示**
**代码示例:简单的链式法则演示**
```
python
```
python
# y = f(g(x)),其中 f(x) = x^2, g(x) = 3x + 1
# y = f(g(x)),其中 f(x) = x^2, g(x) = 3x + 1 # 复合函数 y = f(g(x)),这里手动实现链式求导
x
=
2.0
x
=
2.0
# 给定输入值 x = 2.0
g
=
3
*
x
+
1
g
=
3
*
x
+
1
# 先计算 g(x) = 3x + 1,此时 g = 3*2 + 1 = 7
f
=
g
**
2
f
=
g
**
2
# 计算 f(g) = g²,此时 f = 7² = 49
# 链式法则:dy/dx = df/dg * dg/dx
# 链式法则:dy/dx = df/dg * dg/dx
df_dg
=
2
*
g
df_dg
=
2
*
g
# 对 f(g) = g² 求导,df/dg = 2g,所以 df_dg = 2 * 7 = 14
dg_dx
=
3
dg_dx
=
3
# 对 g(x) = 3x + 1 求导,dg/dx = 3
dy_dx
=
df_dg
*
dg_dx
dy_dx
=
df_dg
*
dg_dx
# 应用链式法则 dy/dx = df/dg * dg/dx = 14 * 3 = 42
print
(
"dy/dx ="
,
dy_dx
)
print
(
"dy/dx ="
,
dy_dx
)
# 输出导数值 dy/dx
```
```
...
@@ -55,13 +50,12 @@ print("dy/dx =", dy_dx)
...
@@ -55,13 +50,12 @@ print("dy/dx =", dy_dx)
**代码示例:梯度下降优化一个简单函数**
**代码示例:梯度下降优化一个简单函数**
```
python
```
python
w
=
5.0
# 初始参数
w
=
5.0
# 初始参数 w,设为 5.0
lr
=
0.1
# 学习率
lr
=
0.1
# 学习率(learning rate),控制每次更新的步长,值越大更新越快但也容易震荡
for
i
in
range
(
100
):
# 迭代 100 次,模拟梯度下降的训练过程
for
i
in
range
(
100
):
grad
=
2
*
(
w
-
3
)
# 计算损失函数 (w - 3)^2 对 w 的导数:dL/dw = 2*(w - 3)
grad
=
2
*
(
w
-
3
)
# 目标函数: (w - 3)^2
w
-=
lr
*
grad
# 根据梯度下降公式更新 w:w = w - lr * grad
w
-=
lr
*
grad
print
(
"优化后的 w:"
,
w
)
# 输出最终优化后的 w 值,应该趋近于目标值 3
print
(
"优化后的 w:"
,
w
)
```
```
---
---
...
@@ -81,15 +75,13 @@ print("优化后的 w:", w)
...
@@ -81,15 +75,13 @@ print("优化后的 w:", w)
**代码示例:矩阵乘法和前向传播**
**代码示例:矩阵乘法和前向传播**
```
python
```
python
import
numpy
as
np
import
numpy
as
np
# 导入 numpy 数值计算库
x
=
np
.
array
([[
1
,
2
]])
# 输入向量(1 行 2 列),表示一个样本,有两个特征
x
=
np
.
array
([[
1
,
2
]])
# 输入
W
=
np
.
array
([[
0.1
,
0.2
],
# 权重矩阵(2 行 2 列),每列对应一个神经元的权重
W
=
np
.
array
([[
0.1
,
0.2
],
# 权重矩阵
[
0.3
,
0.4
]])
[
0.3
,
0.4
]])
b
=
np
.
array
([[
0.5
,
0.6
]])
# 偏置
b
=
np
.
array
([[
0.5
,
0.6
]])
# 偏置向量(1 行 2 列),每个神经元对应一个偏置值
output
=
np
.
dot
(
x
,
W
)
+
b
# 前向传播计算:先做矩阵乘法 xW,再加偏置 b(output = x·W + b)
output
=
np
.
dot
(
x
,
W
)
+
b
print
(
"输出结果:"
,
output
)
# 打印输出,结果为一个 1x2 的向量
print
(
"输出结果:"
,
output
)
```
```
---
---
...
@@ -99,13 +91,12 @@ print("输出结果:", output)
...
@@ -99,13 +91,12 @@ print("输出结果:", output)
**代码示例:PCA 特征提取(简化版)**
**代码示例:PCA 特征提取(简化版)**
```
python
```
python
from
sklearn.decomposition
import
PCA
from
sklearn.decomposition
import
PCA
# 导入 PCA 类,用于执行主成分分析(降维)
from
sklearn.datasets
import
load_iris
from
sklearn.datasets
import
load_iris
# 导入鸢尾花数据集加载函数
X
=
load_iris
()
.
data
# 加载鸢尾花数据集的特征部分(shape: 150 x 4)
X
=
load_iris
()
.
data
pca
=
PCA
(
n_components
=
2
)
# 创建 PCA 实例,指定将数据降到 2 维
pca
=
PCA
(
n_components
=
2
)
X_reduced
=
pca
.
fit_transform
(
X
)
# 拟合数据并进行降维,返回降维后的结果(shape: 150 x 2)
X_reduced
=
pca
.
fit_transform
(
X
)
print
(
"降维后的数据:"
,
X_reduced
[:
5
])
# 打印前 5 个样本降维后的结果
print
(
"降维后的数据:"
,
X_reduced
[:
5
])
```
```
---
---
...
@@ -126,13 +117,13 @@ print("降维后的数据:", X_reduced[:5])
...
@@ -126,13 +117,13 @@ print("降维后的数据:", X_reduced[:5])
**代码示例:朴素贝叶斯思想实现**
**代码示例:朴素贝叶斯思想实现**
```
python
```
python
from
sklearn.naive_bayes
import
GaussianNB
from
sklearn.naive_bayes
import
GaussianNB
# 导入高斯朴素贝叶斯分类器
from
sklearn.datasets
import
load_iris
from
sklearn.datasets
import
load_iris
# 导入鸢尾花数据集加载函数
X
,
y
=
load_iris
(
return_X_y
=
True
)
# 加载鸢尾花数据集,X 是特征,y 是标签
model
=
GaussianNB
()
# 创建一个高斯朴素贝叶斯模型实例
model
.
fit
(
X
,
y
)
# 用全部数据进行模型训练(拟合)
print
(
"预测:"
,
model
.
predict
([
X
[
0
]]))
# 使用训练好的模型预测第一个样本的分类结果
X
,
y
=
load_iris
(
return_X_y
=
True
)
model
=
GaussianNB
()
model
.
fit
(
X
,
y
)
print
(
"预测:"
,
model
.
predict
([
X
[
0
]]))
```
```
---
---
...
@@ -157,19 +148,16 @@ print("预测:", model.predict([X[0]]))
...
@@ -157,19 +148,16 @@ print("预测:", model.predict([X[0]]))
### 基本梯度下降(用于拟合模型)
### 基本梯度下降(用于拟合模型)
```
python
```
python
import
torch
import
torch
# 导入 PyTorch 库,用于张量计算和自动求导
# 目标函数: y = (w - 2)^2,想让 w 逼近 2
# 目标函数: y = (w - 2)^2
w
=
torch
.
tensor
(
5.0
,
requires_grad
=
True
)
# 初始化参数 w=5.0,开启自动求导
w
=
torch
.
tensor
(
5.0
,
requires_grad
=
True
)
optimizer
=
torch
.
optim
.
SGD
([
w
],
lr
=
0.1
)
# 使用随机梯度下降优化器,学习率设为 0.1,只优化 w
for
i
in
range
(
100
):
# 迭代 100 次优化过程
optimizer
=
torch
.
optim
.
SGD
([
w
],
lr
=
0.1
)
loss
=
(
w
-
2
)
**
2
# 计算当前损失,即目标函数值
loss
.
backward
()
# 自动计算 loss 对 w 的梯度(dw)
for
i
in
range
(
100
):
optimizer
.
step
()
# 使用计算得到的梯度更新 w 的值
loss
=
(
w
-
2
)
**
2
optimizer
.
zero_grad
()
# 清空梯度,否则梯度会累加
loss
.
backward
()
print
(
"训练后的 w:"
,
w
.
item
())
# 输出优化后的 w,期望接近 2
optimizer
.
step
()
optimizer
.
zero_grad
()
print
(
"训练后的 w:"
,
w
.
item
())
```
```
### 补充资料:gnn.club
### 补充资料:gnn.club
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment