搜索最优的xgb模型参数
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
X_train = np.random.randn(10,8)
y_train = np.concatenate([np.ones(5),np.zeros(5)],0)
# 定义参数取值范围
parameters = {'learning_rate': [0.1, 0.2, 0.3, 0.4],
'subsample': [0.6, 0.7, 0.8, 0.9,1.0],
'colsample_bytree': [0.7, 0.8, 0.9, 1.0],
'max_depth': [1, 2, 3, 5, 8],
'n_estimators': [100, 200, 300, 500, 700]}
model = XGBClassifier(n_estimators=200)
clf = GridSearchCV(model, parameters, cv=3, scoring='roc_auc', verbose=1, n_jobs=-1)
clf = clf.fit(X_train, y_train)
# 网格搜索后的最好参数为
print(clf.best_params_)
使用xgb进行训练预测
import numpy as np
from xgboost import XGBClassifier
X_train = np.random.randn(10,8)
y_train = np.concatenate([np.ones(5),np.zeros(5)],0)
X_test = np.random.randn(6,8)
y_test = np.concatenate([np.ones(3),np.zeros(3)],0)
clf = XGBClassifier(objective='binary:logistic', colsample_bytree=0.8, learning_rate=0.2, max_depth=4, subsample=0.9, n_estimators=300, use_label_encoder=False)
clf.fit(X_train, y_train, early_stopping_rounds=50, eval_metric="auc", eval_set=[(X_test, y_test)], verbose=True)
train_predict = clf.predict(X_train)
yp = clf.predict_proba(X_test) # [6,2] 第一列是预测为第一类的概率,第二列是预测为第二类的概率
print(yp)
print(clf.feature_importances_) # 输出 特征重要性