-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtesttest.py
64 lines (51 loc) · 2.41 KB
/
testtest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, classification_report
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import numpy as np
# Load your dataset
data_path = './load_management_dataset_india_500.csv'
data_frame = pd.read_csv(data_path)
# Print the column names to verify
print("Column names:", data_frame.columns)
# Check if the expected columns are present
expected_columns = ['ime_of_day', 'day_of_week', 'season', 'temperature', 'humidity', 'historical_load', 'current_load']
missing_columns = [col for col in expected_columns if col not in data_frame.columns]
if missing_columns:
raise KeyError(f"The following expected columns are missing from the dataset: {missing_columns}")
# Extract the relevant columns
X = data_frame[expected_columns]
# Extract the target variable
y = data_frame['load_label']
# Initialize OneHotEncoder
encoder = OneHotEncoder(sparse_output=False, drop='first') # Drop='first' to avoid multicollinearity
# Apply one-hot encoding to categorical features
encoded_features = encoder.fit_transform(X[['ime_of_day', 'day_of_week', 'season']])
# Combine one-hot encoded features with other numerical features
numerical_features = X[['temperature', 'humidity', 'historical_load', 'current_load']].values
encoded_data = np.hstack((encoded_features, numerical_features))
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(encoded_data, y, test_size=0.2, random_state=42)
# Initialize the models
models = {
'Logistic Regression': LogisticRegression(),
'Random Forest': RandomForestClassifier(),
'SVM': SVC(),
'KNN': KNeighborsClassifier(),
}
# Train and evaluate each model
for name, model in models.items():
model.fit(X_train, y_train)
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
print(f'{name} Accuracy: {accuracy:.2f}')
print(classification_report(y_test, predictions))
# Cross-validation for more robust results
for name, model in models.items():
cv_scores = cross_val_score(model, encoded_data, y, cv=5)
print(f'{name} Cross-Validation Accuracy: {cv_scores.mean():.2f}')