Learning curves (example)

 


# In this exercise we'll examine a learner which has high variance, and tries to learn
# nonexistant patterns in the data.
# Use the learning curve function from sklearn.learning_curve to plot learning curves
# of both training and testing error.

from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve
from sklearn.cross_validation import KFold
from sklearn.metrics import explained_variance_score, make_scorer
import numpy as np

# Set the learning curve parameters; you'll need this for learning_curves
size = 1000
cv = KFold(size,shuffle=True)
score = make_scorer(explained_variance_score)

# Create a series of data that forces a learner to have high variance
X = np.round(np.reshape(np.random.normal(scale=5,size=2*size),(-1,2)),2)
y = np.array([[np.sin(x[0]+np.sin(x[1]))] for x in X])

def plot_curve():
 reg = DecisionTreeRegressor()
 reg.fit(X,y)
 print "Regressor score: {:.4f}".format(reg.score(X,y))

 # TODO: Use learning_curve imported above to create learning curves for both the
 # training data and testing data. You'll need 'size', 'cv' and 'score' from above.

 training_sizes, training_scores, testing_scores=learning_curve(reg, X, y, train_sizes=np.array([ 0.1, 0.33, 0.55, 0.78, 1. ]), cv=cv, scoring=score, exploit_incremental_learning=False, n_jobs=1, pre_dispatch='all', verbose=0)

 # TODO: Plot the training curves and the testing curves
 # Use plt.plot twice -- one for each score. Be sure to give them labels!

 plt.grid()

 plt.plot(training_sizes, training_scores, 'o-', color="r",
 label="Training score")
 plt.plot(training_sizes, testing_scores, 'o-', color="g",
 label="testing score")

plt.legend(loc="best")

 # Plot aesthetics
 plt.ylim(-0.1, 1.1)
 plt.ylabel("Curve Score")
 plt.xlabel("Training Points")
 plt.legend(bbox_to_anchor=(1.1, 1.1))
 plt.show()

This entry was posted in Python for data analysis. Bookmark the permalink.

Leave a comment