해결된 질문
작성
·
1K
0
선생님!! 4회 기출 유형(작업형2)에서 랜덤포레스트와 lgb 는 이상이 없는데, xgb 로 모델링 할 경우에만 에러가 발생하는데, 무슨 문제인가요?
[코딩]
import pandas as pd
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
# print(train.shape, test.shape)
# print(train.head(3))
# print(test.head(3))
# print(train.info())
# print(train.describe())
# print(train.describe(include='object'))
# print(test.describe(include='object'))
# print(train['Segmentation'].value_counts())
# print(train.isnull().sum())
# print(test.isnull().sum())
train = train.drop('ID', axis=1)
test_id = test.pop('ID')
test.head(3)
cols = train.select_dtypes(include='object').columns
print(cols)
from sklearn.preprocessing import LabelEncoder
for col in cols :
le = LabelEncoder()
train[col] = le.fit_transform(train[col])
test[col] = le.transform(test[col])
train.head(3)
from sklearn.model_selection import train_test_split
X_tr, X_val, y_tr, y_val = train_test_split(train.drop('Segmentation', axis=1), train['Segmentation'], test_size=0.1, random_state=2022)
print(X_tr.shape, X_val.shape, y_tr.shape, y_val.shape)
from sklearn.metrics import f1_score
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state=2022, max_depth=7, n_estimators=100)
rf.fit(X_tr, y_tr)
pred = rf.predict(X_val)
pred[:10]
f1_score(y_val, pred, average='macro')
from sklearn.metrics import f1_score
# import lightgbm as lgb
# model = lgb.LGBMClassifier(random_state=2022, max_depth=5, n_estimators=800, learning_rate=0.01 )
# model.fit(X_tr, y_tr)
# pred = model.predict(X_val)
# pred[:10]
# f1_score(y_val, pred, average='macro')
from sklearn.metrics import f1_score
from xgboost import XGBClassifier
xgb = XGBClassifier(random_state=2022)
xgb.fit(X_tr, y_tr)
pred = xgb.predict(X_val)
pred[:10]
[에러내용]
ValueError Traceback (most recent call last)
<ipython-input-57-d656863c7bc3> in <cell line: 4>()
2 from xgboost import XGBClassifier
3 xgb = XGBClassifier(random_state=2022)
----> 4 xgb.fit(X_tr, y_tr)
5 pred = xgb.predict(X_val)
6 pred[:10]
1 frames
/usr/local/lib/python3.10/dist-packages/xgboost/sklearn.py in fit(self, X, y, sample_weight, base_margin, eval_set, eval_metric, early_stopping_rounds, verbose, xgb_model, sample_weight_eval_set, base_margin_eval_set, feature_weights, callbacks)
1438 or not (self.classes_ == expected_classes).all()
1439 ):
-> 1440 raise ValueError(
1441 f"Invalid classes inferred from unique values of y
. "
1442 f"Expected: {expected_classes}, got {self.classes_}"
ValueError: Invalid classes inferred from unique values of y
. Expected: [0 1 2 3], got [1 2 3 4]
답변 1
0
우선 xgboost가 너무 예민해요~ 그래서 xgb대신 lightgbm를 추천해요!
y 종류(클래스)가 1234인데
xgb는 0부터 구성되어야 해요. 그러니깐 클래스가 0,1,2,3이 되어야 합니다.
예!
lgb를 사용해야 겠네요. ㄱ마사합니다.