Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
点
点头人工智能课程-v6.0-通识
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
YAN
点头人工智能课程-v6.0-通识
Commits
8d3ee023
Commit
8d3ee023
authored
Jul 03, 2025
by
前钰
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Upload New File
parent
224807a2
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
324 additions
and
0 deletions
+324
-0
math.ipynb
3-Python编程基础/3.4-AI数学基础与应用/math.ipynb
+324
-0
No files found.
3-Python编程基础/3.4-AI数学基础与应用/math.ipynb
0 → 100644
View file @
8d3ee023
{
{
"cells": [
{
"cell_type": "code",
"execution_count": 15,
"id": "f72cecef",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"导数值: 4.999999999810711\n"
]
}
],
"source": [
"import numpy as np # 导入numpy库,主要用于科学计算\n",
"\n",
"# 定义一个函数f(x),计算表达式 x^2 + 3x + 2 的值\n",
"def f(x):\n",
" return x**2 + 3*x + 2\n",
"\n",
"# 定义一个数值导数函数 numerical_derivative\n",
"# 输入:\n",
"# f :目标函数\n",
"# x :求导点\n",
"# eps :一个非常小的数,用来计算导数的差分间隔,默认是1e-6\n",
"def numerical_derivative(f, x, eps=1e-6):\n",
" # 利用中心差分公式近似导数:\n",
" # f'(x) ≈ [f(x + eps) - f(x - eps)] / (2 * eps)\n",
" return (f(x + eps) - f(x - eps)) / (2 * eps)\n",
"\n",
"x = 1.0 # 设定求导点x=1.0\n",
"\n",
"# 打印在x=1.0处的导数近似值 \n",
"print(\"导数值:\", numerical_derivative(f, x))\n"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "8449ad63",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"dy/dx = 42.0\n"
]
}
],
"source": [
"# 复合函数的例子:y = f(g(x))\n",
"# 其中 f(x) = x^2,g(x) = 3x + 1\n",
"x = 2.0 # 给定自变量x的值\n",
"\n",
"g = 3 * x + 1 # 计算内部函数g(x)的值,g(2) = 3*2 + 1 = 7\n",
"f = g**2 # 计算外部函数f(g)的值,f(7) = 7^2 = 49\n",
"\n",
"# 链式法则求导:\n",
"# dy/dx = df/dg * dg/dx\n",
"# df/dg 是 f 对 g 的导数,f(g) = g^2,因此 df/dg = 2g\n",
"df_dg = 2 * g # 计算 df/dg,此处为 2 * 7 = 14\n",
"\n",
"# dg/dx 是 g 对 x 的导数,g(x) = 3x + 1,导数为3\n",
"dg_dx = 3 # 计算 dg/dx,值为3\n",
"\n",
"# 计算 dy/dx = df/dg * dg/dx = 14 * 3 = 42\n",
"dy_dx = df_dg * dg_dx\n",
"\n",
"print(\"dy/dx =\", dy_dx) # 输出导数值\n"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "02622489",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"优化后的 w: 3.0000000353369414\n"
]
}
],
"source": [
"# 初始化参数 w,目标是通过优化让 w 接近 3\n",
"w = 5.0 # 初始参数值为 5,距离目标值 3 有一定的偏差\n",
"\n",
"# 设置学习率(learning rate),控制每次参数更新的步长\n",
"lr = 0.1 # 学习率是 0.1,不能太大也不能太小\n",
"\n",
"# 梯度下降的迭代过程\n",
"for i in range(80): # 执行 100 次迭代更新\n",
" grad = 2 * (w - 3) # 计算损失函数 (w - 3)^2 的梯度,公式推导如下:\n",
" # 假设损失函数 f(w) = (w - 3)^2\n",
" # 则 f'(w) = 2*(w - 3)\n",
" \n",
" w -= lr * grad # 使用梯度下降法更新参数 w\n",
" # w = w - 学习率 × 梯度\n",
" # 目的是逐步减小损失函数的值,向最小值(最优 w=3)靠近\n",
"\n",
"# 输出最终优化后的参数 w,应该非常接近 3\n",
"print(\"优化后的 w:\", w)\n"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "7988f5f1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"输出结果: [[1.2 1.6]]\n"
]
}
],
"source": [
"import numpy as np # 导入NumPy库,用于处理数组和矩阵运算\n",
"\n",
"x = np.array([[1, 2]]) # 定义输入数据x,是一个形状为(1, 2)的二维数组,表示有两个特征\n",
"\n",
"W = np.array([[0.1, 0.2], # 定义权重矩阵W,形状为(2, 2),表示有2个输入和2个输出神经元\n",
" [0.3, 0.4]])\n",
"\n",
"b = np.array([[0.5, 0.6]]) # 定义偏置项b,形状为(1, 2),每个输出神经元对应一个偏置值\n",
"\n",
"output = np.dot(x, W) + b # 先进行矩阵乘法x·W(形状变为1x2),再加上偏置b(逐元素加法)\n",
" # 即 output = xW + b,表示一层神经网络的线性变换\n",
"\n",
"print(\"输出结果:\", output) # 打印输出结果,形如 [[1.2 1.6]]\n"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "f345a811",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[1 2]]\n",
"[[0.1 0.2]\n",
" [0.3 0.4]]\n"
]
}
],
"source": [
"print(x)\n",
"print(W)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "6972756f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"降维后的数据: [[-2.68412563 0.31939725]\n",
" [-2.71414169 -0.17700123]\n",
" [-2.88899057 -0.14494943]\n",
" [-2.74534286 -0.31829898]\n",
" [-2.72871654 0.32675451]]\n"
]
}
],
"source": [
"from sklearn.decomposition import PCA # 从sklearn库中导入PCA类,用于主成分分析降维\n",
"from sklearn.datasets import load_iris # 导入load_iris函数,用来加载鸢尾花数据集\n",
"\n",
"X = load_iris().data # 加载鸢尾花数据集的特征数据,X是一个形状为(150, 4)的数组,表示150个样本,每个样本4个特征\n",
"\n",
"pca = PCA(n_components=2) # 创建一个PCA对象,指定降维到2个主成分(把4维数据降到2维)\n",
"\n",
"X_reduced = pca.fit_transform(X) \n",
"# 先用X训练PCA模型(fit),计算主成分方向\n",
"# 然后将原始数据X映射到这两个主成分构成的新空间中(transform)\n",
"# fit_transform是fit和transform的合并操作,输出降维后的数据,形状为(150, 2)\n",
"\n",
"print(\"降维后的数据:\", X_reduced[:5]) \n",
"# 打印降维后数据的前5条样本,每条样本现在只有两个特征(两个主成分的值)\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "0f8108c5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n",
"0 5.1 3.5 1.4 0.2 \n",
"1 4.9 3.0 1.4 0.2 \n",
"2 4.7 3.2 1.3 0.2 \n",
"3 4.6 3.1 1.5 0.2 \n",
"4 5.0 3.6 1.4 0.2 \n",
"\n",
" label \n",
"0 0 \n",
"1 0 \n",
"2 0 \n",
"3 0 \n",
"4 0 \n"
]
}
],
"source": [
"import pandas as pd # 导入 pandas 库,用于处理表格数据\n",
"from sklearn.datasets import load_iris # 从 sklearn 中导入加载鸢尾花数据集的函数\n",
"\n",
"iris = load_iris() # 加载鸢尾花数据集,返回一个包含特征、标签等信息的字典结构\n",
"df = pd.DataFrame(iris.data, columns=iris.feature_names) # 将特征数据转换成 DataFrame,并设置列名为特征名称\n",
"df['label'] = iris.target # 在 DataFrame 中新增一列,用于存储样本的分类标签(0,1,2)\n",
"print(df.head()) # 打印前5行数据,检查表格是否正确加载\n"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "dc4f981f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"预测: [2]\n"
]
}
],
"source": [
"from sklearn.naive_bayes import GaussianNB # 导入 Gaussian Naive Bayes(高斯朴素贝叶斯)分类器\n",
"from sklearn.datasets import load_iris # 导入内置的 iris(鸢尾花)数据集加载工具\n",
"\n",
"# 加载鸢尾花数据集的特征(X)和标签(y)\n",
"X, y = load_iris(return_X_y=True)\n",
"# X 是一个 shape 为 (150, 4) 的二维数组,对应 150 条数据,每条数据有 4 个特征\n",
"# y 是一个 shape 为 (150,) 的一维数组,对应每条数据的分类标签(0, 1, 2 分别表示三种花)\n",
"\n",
"# 创建一个 Gaussian Naive Bayes 模型对象\n",
"model = GaussianNB()\n",
"\n",
"# 用数据训练(拟合)模型:即学习每一类的均值、方差等参数\n",
"model.fit(X, y)\n",
"\n",
"# 用训练好的模型预测第1条样本 X[0] 的类别\n",
"print(\"预测:\", model.predict([X[100]]))\n",
"# 注意这里必须写成二维数组 [X[0]],因为模型要求输入是二维的(即 shape 是 (1, 4))\n"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "189bbc2c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"训练后的 w: 2.000000476837158\n"
]
}
],
"source": [
"import torch # 导入 PyTorch 库\n",
"\n",
"# 定义一个可训练的参数 w,初始值为 5.0,requires_grad=True 表示需要计算梯度\n",
"w = torch.tensor(5.0, requires_grad=True)\n",
"\n",
"# 使用随机梯度下降(SGD)优化器优化参数 w,学习率设为 0.1\n",
"optimizer = torch.optim.SGD([w], lr=0.1)\n",
"\n",
"# 训练迭代 100 次\n",
"for i in range(80):\n",
" loss = (w - 2)**2 # 定义损失函数,目标是让 w 趋近于 2\n",
" loss.backward() # 自动求导,计算损失对 w 的梯度 ∂loss/∂w\n",
" optimizer.step() # 根据梯度更新参数 w,即 w = w - lr * grad\n",
" optimizer.zero_grad() # 梯度清零,防止梯度在每次循环中累加\n",
"\n",
"# 打印训练结束后的参数值,将张量转为 Python 数值输出\n",
"print(\"训练后的 w:\", w.item())\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "pytorch_gpu",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment