-
Notifications
You must be signed in to change notification settings - Fork 0
/
RF.py
66 lines (59 loc) · 2.47 KB
/
RF.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 14:14:37 2017
@author: user98
"""
from sklearn.metrics import log_loss
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model
# Load pandas
import pandas as pd
import math
# Load numpy
import numpy as np
DIR = "../data/stock_train_data_20170910.csv"
COLUMNS = list(range(1,91)) #Read Feature,weight,label
all_set = pd.read_csv(DIR, skipinitialspace=True,
skiprows=0, usecols=COLUMNS).as_matrix()
TESTDIR="../data/stock_test_data_20170910.csv"
SORT = list(range(0,89))
SORT.insert(0,89) #89,0-87,88
all_set = all_set[:,np.array(SORT)] #Change into 0Label,Feature,88Weight
np.random.shuffle(all_set)
training_set=all_set
SSD=list(range(1,89))
#prediction_set=pd.read_csv(TESTDIR, skipinitialspace=True,
# skiprows=0, usecols=SSD).as_matrix()
training_set=all_set[0:math.floor(all_set.shape[0]*0.7)]
prediction_set=all_set[math.floor(all_set.shape[0]*0.7):]
prediction_set=all_set[math.floor(all_set.shape[0]*0.7):]
training_weight=training_set[:,-1]
training_set=training_set[:,:-1]
prediction_weight=prediction_set[:,-1]
prediction_set=prediction_set[:,:-1]
#logreg = linear_model.LogisticRegression()
# we create an instance of Neighbours Classifier and fit the data.
#logreg.fit(training_set[:,1:],training_set[:,0])
#predicted_class = logreg.predict(prediction_set[:,1:])
clf=RandomForestClassifier(n_estimators=50000,criterion='gini',n_jobs=40,verbose=2)
clf.fit(training_set[:,1:],training_set[:,0])
#predicted_class=clf.predict(prediction_set[:,1:])
predicted_proba=clf.predict_proba(prediction_set[:,1:])
#predicted_prob=clf.predict_proba(prediction_set)
los=log_loss(prediction_set[:,0],predicted_proba)
with open("RFloss.txt", "w") as output:
output.write(str(los))
print(los)
#testdata: 321674 ~ 521619
training_set=all_set
prediction_set=pd.read_csv(TESTDIR, skipinitialspace=True,
skiprows=0, usecols=SSD).as_matrix()
training_weight=training_set[:,-1]
training_set=training_set[:,:-1]
clf=RandomForestClassifier(n_estimators=50000,criterion='gini',n_jobs=40,verbose=2)
clf.fit(training_set[:,1:],training_set[:,0])
predictions=clf.predict_proba(prediction_set)
indices = pd.read_csv(TESTDIR, skipinitialspace=True, skiprows=0, usecols=[0]).as_matrix().flatten()
df = pd.DataFrame(data={'id':indices, 'proba':predictions[:,1]})
df.to_csv('result_notstan_RF.csv',index=False)
print('Result saved.')