def svm_loss_naive(W, X, y):
"""
SVM loss function, naive implementation calculating loss for each sample
using loops.
Inputs:
- X: A numpy array of shape (n, m) containing data(samples).
- y: A numpy array of shape (m, ) containing labels
- W: A numpy array of shape (p, n) containing weights.
"""
# Compute the loss
num_classes = W.shape[0] # classes weights are in row wise fashion
num_samples = X.shape[1] # samples of unknown images are in column-wise fashion
loss = 0.0
delta = 1 # SVM parameter
for i in range(num_samples):
scores = np.dot(W, X[:,i])
correct_class_score = scores[y[i]]
for j in range(num_classes):
if j == y[i]:
continue
margin = max(0, scores[j] - correct_class_score + delta )
loss = loss + margin
# Average loss
loss = loss / num_samples
return loss根据我对python代码的理解
来计算第1类的得分。
然后,我们将获取存储在数组y
中的ith示例的correct_class_score。
,
的索引。
我预先理解的其余代码
发布于 2022-11-06 18:15:35
这是SVM丢失(数据丢失)函数的定义:

在内部求和中,明确排除了等于易的j指数。
https://stackoverflow.com/questions/66740435
复制相似问题