sklearnのrandom forestの性能チェック

ランダムフォレストの性能がどの程度のものなのかを知りたかったので、scikit-learnからRandomForestClassifierとLogisticRegressionを読み込んで試してみた。 どういう評価方法が正しいのかよく分からないけれど、ひとまず混同行列での評価を行ってみることにした。
誰か正しい性能評価の方法を教えて下さい・・・

# ランダムフォレスト
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix

# X, Y データ
X = df.example
Y = df.example

train_X, test_X, train_Y, test_Y = train_test_split(X, Y, random_state = 0)
p_score_array_testdata = []
p_score_array_traindata = []
for i in range(300):
    train_X, test_X, train_Y, test_Y = train_test_split(X, Y, random_state = i)
    rf = RandomForestClassifier(n_estimators=30)
    rf.fit(train_X, train_Y)
    pred_X = rf.predict(test_X)
    pred_X_t = rf.predict(train_X)
    accuracy_s = accuracy_score(pred_X, test_Y)
    precision_s = precision_score(pred_X, test_Y)
    recall_s = recall_score(pred_X, test_Y)
    f1_s = f1_score(pred_X, test_Y)
    accuracy_s_t = accuracy_score(pred_X_t, train_Y)
    precision_s_t = precision_score(pred_X_t, train_Y)
    recall_s_t = recall_score(pred_X_t, train_Y)
    f1_s_t = f1_score(pred_X_t, train_Y)
    true_positive, false_negative, false_positive, true_negative = confusion_matrix(pred_X, test_Y).ravel()
    true_positive_t, false_negative_t, false_positive_t, true_negative_t = confusion_matrix(pred_X_t, train_Y).ravel()
    p_score_array_testdata.append([accuracy_s, precision_s, recall_s, f1_s])
    p_score_array_traindata.append([accuracy_s_t, precision_s_t, recall_s_t, f1_s_t])
# testセットに対するconfusion matrix
p_score_df = pd.DataFrame(p_score_array_testdata, columns=["accuracy", "precision", "racall", "F_value"])
p_score_df.plot()
p_score_df.describe()

# In[]:
trainセットに対するconfusion matrix
p_score_df_t = pd.DataFrame(p_score_array_traindata, columns=["accuracy", "precision", "racall", "F_value"])
p_score_df_t.plot()
p_score_df_t.describe()


# In[]:
# ロジスティック回帰分析
from sklearn.linear_model import LogisticRegression

# X, Y データ
X = df.example
Y = df.example

train_X, test_X, train_Y, test_Y = train_test_split(X, Y, random_state = 0)
p_score_array_testdata = []
p_score_array_traindata = []
for i in range(300):
    train_X, test_X, train_Y, test_Y = train_test_split(X, Y, random_state = i)
    rf = LogisticRegression()
    rf.fit(train_X, train_Y)
    pred_X = rf.predict(test_X)
    pred_X_t = rf.predict(train_X)
    accuracy_s = accuracy_score(pred_X, test_Y)
    precision_s = precision_score(pred_X, test_Y)
    recall_s = recall_score(pred_X, test_Y)
    f1_s = f1_score(pred_X, test_Y)
    accuracy_s_t = accuracy_score(pred_X_t, train_Y)
    precision_s_t = precision_score(pred_X_t, train_Y)
    recall_s_t = recall_score(pred_X_t, train_Y)
    f1_s_t = f1_score(pred_X_t, train_Y)
    true_positive, false_negative, false_positive, true_negative = confusion_matrix(pred_X, test_Y).ravel()
    true_positive_t, false_negative_t, false_positive_t, true_negative_t = confusion_matrix(pred_X_t, train_Y).ravel()
    p_score_array_testdata.append([accuracy_s, precision_s, recall_s, f1_s])
    p_score_array_traindata.append([accuracy_s_t, precision_s_t, recall_s_t, f1_s_t])
p_score_df = pd.DataFrame(p_score_array_testdata, columns=["accuracy", "precision", "racall", "F_value"])
p_score_df.plot()
p_score_df.describe()

# In[]:
p_score_df_t = pd.DataFrame(p_score_array_traindata, columns=["accuracy", "precision", "racall", "F_value"])
p_score_df_t.plot()
p_score_df_t.describe()