% Data preparation data = readtable('Attrition_train.csv') n = size(data, 1); train_ratio = 0.8; train_size = floor(n * train_ratio); train_indices = randsample(n, train_size); test_indices = setdiff(1:n, train_indices); train_data = data(train_indices, :); train_labels = data(train_indices, end); test_data = data(test_indices, :); test_labels = data(test_indices, end); % Model training k = 5; model = fitcknn(train_data(:, 1:end-1), train_labels, 'NumNeighbors', k); % Model test predictions = predict(model, test_data(:, 1:end-1)); accuracy = sum(predictions == test_labels) / length(test_labels); disp(['Accuracy: ', num2str(accuracy)]);代码优化
时间: 2023-12-14 18:39:58 浏览: 89
可以考虑将代码封装成函数,这样可以提高代码的可读性和可重用性。另外,可以使用交叉验证来选择最优的k值,可以使用matlab自带的cvpartition函数来进行分割数据集。代码如下:
```
function [accuracy, model] = knn_classification(data, k)
% data: 数据集,包含标签
% k: k值
% 分割数据集
cv = cvpartition(size(data, 1), 'HoldOut', 0.2);
train_data = data(cv.training,:);
train_labels = train_data(:, end);
test_data = data(cv.test,:);
test_labels = test_data(:, end);
% 模型训练
model = fitcknn(train_data(:, 1:end-1), train_labels, 'NumNeighbors', k);
% 模型测试
predictions = predict(model, test_data(:, 1:end-1));
accuracy = sum(predictions == test_labels) / length(test_labels);
disp(['Accuracy: ', num2str(accuracy)]);
end
```
使用方式:
```
data = readtable('Attrition_train.csv');
[accuracy, model] = knn_classification(data, 5);
```
阅读全文