Cross-Entropy Loss
Cross-Entropy loss
def softmax(X):
exps = np.exp(X)
return exps / np.sum(exps)
def cross_entropy(predictions, targets):
N = predictions.shape[0]
ce = -np.sum(targets * np.log(predictions)) / N
return ce
predictions = np.array([[0.25, 0.25, 0.25, 0.25], [0.01, 0.01, 0.01, 0.97]]) # (N, num_classes)
targets = np.array([[1, 0, 0, 0], [0, 0, 0, 1]]) # (N, num_classes)
cross_entropy(predictions, targets)
# 0.7083767843022996
log_loss(targets, predictions)
# 0.7083767843022996
log_loss(targets, predictions) == cross_entropy(predictions, targets)
# TrueLast updated