theta = np.random.randn(2,1)
时间: 2024-02-04 07:07:39 浏览: 154
根据提供的引用内容,theta = np.random.randn(2,1)是一个使用NumPy库生成一个2行1列的随机数组的Python代码。其中,np.random.randn()函数返回一个或多个样本,具有标准正态分布。这意味着生成的随机数遵循正态分布,均值为0,标准差为1。
下面是一个示例代码,展示如何使用theta = np.random.randn(2,1)生成随机数组:
```python
import numpy as np
theta = np.random.randn(2,1)
print(theta)
```
输出:
```
[[-0.12345678]
[ 1.23456789]]
```
相关问题
import matplotlib.pyplot as plt from scipy.stats import norm X = df.iloc[:, 4:] # 特征数据 X = scaler.fit_transform(X) y_1 = df[['U(Ⅳ)浓度']] # 目标变量1 y_2 = df[['U(Ⅵ)浓度']] # 目标变量2) # 添加偏置项 x0=1 到 X 中 X_b = np.c_[np.ones((X.shape[0], 1)), X] # 初始化学习率,迭代次数和初始参数 eta = 0.1 n_iterations = 1000 theta = np.random.randn(2, 1) # 定义似然函数计算梯度 def likelihood(theta, X, y): m = y.size h = X.dot(theta).flatten() mu = h sigma = 1 # 假设高斯噪声的标准差是1 p = norm(mu, sigma).pdf(y.flatten()) L = np.prod(p) return L # 定义损失函数计算梯度 def loss(theta, X, y): m = y.size h = X.dot(theta).flatten() mu = h sigma = 1 # 假设高斯噪声的标准差是1 p = norm(mu, sigma).pdf(y.flatten()) J = -np.sum(np.log(p)) return J/m # 批量梯度下降算法 for iteration in range(n_iterations): gradients = 2/X_b.shape[0] * X_b.T.dot(X_b.dot(theta) - y) theta = theta - eta * gradients if iteration % 100 == 0: print(f"Iteration {iteration}: theta={theta.flatten()}, likelihood={likelihood(theta, X_b, y)}, loss={loss(theta, X_b, y)}") # 绘制数据和回归直线 plt.plot(X, y, "b.") X_new = np.array([[0], [10]]) X_new_b = np.c_[np.ones((2, 1)), X_new] y_predict = X_new_b.dot(theta) plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions") plt.xlabel("X") plt.ylabel("y") plt.legend() plt.show()
这段代码是一个简单的线性回归模型,使用了批量梯度下降法来拟合数据。具体来说,它首先进行了特征数据的标准化处理,然后定义了似然函数和损失函数来计算梯度,最后使用批量梯度下降法来更新参数并拟合数据。在训练过程中,它每迭代100次就会输出当前参数、似然函数和损失函数的值。最后,它将拟合出的回归直线和原始数据一起绘制在图上,以便进行可视化比较。
翻译这段程序并自行赋值调用:import matplotlib.pyplot as plt import numpy as np import sklearn import sklearn.datasets import sklearn.linear_model def plot_decision_boundary(model, X, y): # Set min and max values and give it some padding x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1 y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole grid Z = model(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.ylabel('x2') plt.xlabel('x1') plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral) def sigmoid(x): s = 1/(1+np.exp(-x)) return s def load_planar_dataset(): np.random.seed(1) m = 400 # number of examples N = int(m/2) # number of points per class print(np.random.randn(N)) D = 2 # dimensionality X = np.zeros((m,D)) # data matrix where each row is a single example Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue) a = 4 # maximum ray of the flower for j in range(2): ix = range(Nj,N(j+1)) t = np.linspace(j3.12,(j+1)3.12,N) + np.random.randn(N)0.2 # theta r = anp.sin(4t) + np.random.randn(N)0.2 # radius X[ix] = np.c_[rnp.sin(t), rnp.cos(t)] Y[ix] = j X = X.T Y = Y.T return X, Y def load_extra_datasets(): N = 200 noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3) noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2) blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6) gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None) no_structure = np.random.rand(N, 2), np.random.rand(N, 2) return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure
这段程序是一个分类模型的辅助函数,包括了绘制决策边界、sigmoid函数和加载数据集的函数。具体实现如下:
```python
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
def plot_decision_boundary(model, X, y):
# 设置最小值和最大值,并给它们一些填充
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# 生成一个网格,网格中点的距离为h
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# 对整个网格预测函数值
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# 绘制轮廓和训练样本
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
def sigmoid(x):
s = 1 / (1 + np.exp(-x))
return s
def load_planar_dataset():
np.random.seed(1)
m = 400 # 样本数量
N = int(m / 2) # 每个类的样本数量
# 生成数据集
D = 2 # 特征维度
X = np.zeros((m, D)) # 特征矩阵
Y = np.zeros((m, 1), dtype='uint8') # 标签向量
a = 4 # 花的最大半径
for j in range(2):
ix = range(N*j, N*(j+1))
t = np.linspace(j*3.12, (j+1)*3.12, N) + np.random.randn(N)*0.2 # theta
r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def load_extra_datasets():
N = 200
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)
no_structure = np.random.rand(N, 2), np.random.rand(N, 2)
return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure
```
这段程序中包含了以下函数:
- `plot_decision_boundary(model, X, y)`:绘制分类模型的决策边界,其中`model`是分类模型,`X`是特征矩阵,`y`是标签向量。
- `sigmoid(x)`:实现sigmoid函数。
- `load_planar_dataset()`:加载一个二维的花瓣数据集。
- `load_extra_datasets()`:加载五个其他数据集。
阅读全文