sklearn数据切分及交叉验证笔记

数据切分

方法1 随机切分

from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.4, random_state=0)

方法2 K折 预测函数学习时使用k-1个折叠中的数据当train,最后一个剩下的折叠会用于测试test

from sklearn import datasets
from sklearn.model_selection import KFold
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
k_fold = KFold(n_splits=3, random_state=None, shuffle=False)
for k, (train, test) in enumerate(k_fold.split(X, y)):
    print("%s %s %s" % (k,len(train), len(test))) #X[train], X[test], y[train], y[test]

RepeatedKFold 重复K-Fold n次 进行n次k-Fold

# from sklearn.model_selection import RepeatedKFold
# rk_fold = RepeatedKFold(n_splits=2, n_repeats=2, random_state=random_state)
# for k, (train, test) in enumerate(rk_fold.split(X, y)):
#     print("%s %s %s" % (k,len(train), len(test)))

方法3 分层k折 StratifiedKFold 每个小集合中各个类别的样例比例大致和完整数据集中相同

from sklearn.model_selection import StratifiedKFold
iris = datasets.load_iris()
skf = StratifiedKFold(n_splits=3)
X=iris.data
y=iris.target
for train, test in skf.split(X, y):
    print("%s %s %s" % (len(np.where(y[train]==0)[0]), len(np.where(y[test]==0)[0])))

交叉验证

from sklearn import svm
from sklearn.model_selection import cross_val_score
clf = svm.SVC(kernel='linear', C=1)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
##评分估计的平均得分和 95% 置信区间由此给出
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
##修改score
scores = cross_val_score(clf, iris.data, iris.target, cv=5, scoring='f1_macro')
##修改cv分折方法
from sklearn.model_selection import StratifiedKFold
skf = StratifiedKFold(n_splits=3)
scores = cross_val_score(clf, iris.data, iris.target, cv=skf)

版权声明:本文为q383700092原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。