Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

VEHICLE LIVE RISK PREDICTION #529

Merged
merged 9 commits into from
Jan 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15,001 changes: 15,001 additions & 0 deletions Vehicle Live Risk Prediction/Dataset/Vehicle Risk Prediction Dataset.csv

Large diffs are not rendered by default.

Binary file added Vehicle Live Risk Prediction/Images/1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Vehicle Live Risk Prediction/Images/2.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Vehicle Live Risk Prediction/Images/3.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
158 changes: 158 additions & 0 deletions Vehicle Live Risk Prediction/Model/ANN_Based.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 5,
"id": "c26d8b33-9032-4972-91cb-70eedea7591d",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import StandardScaler\n",
"from sklearn.metrics import accuracy_score, classification_report\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers\n",
"from sklearn.preprocessing import LabelEncoder\n",
"\n",
"df = pd.read_csv(\"Downloads/Vehicle Risk Prediction Dataset.csv\")\n",
"\n",
"\n",
"\n",
"# Encode categorical features using LabelEncoder\n",
"le_visibility = LabelEncoder()\n",
"le_road_surface_conditions = LabelEncoder()\n",
"le_weather = LabelEncoder()\n",
"le_traffic_density = LabelEncoder()\n",
"le_road_hazards = LabelEncoder()\n",
"#le_time_of_day = LabelEncoder()\n",
"le_fatigue_level = LabelEncoder()\n",
"le_medical_condition = LabelEncoder()\n",
"le_speeding = LabelEncoder()\n",
"le_light= LabelEncoder()\n",
"#le_road_type=LabelEncoder()\n",
"#le_landscape=LabelEncoder()\n",
"\n",
"df['visibility_n'] = le_visibility.fit_transform(df['Visibility'])\n",
"df['road_surface_conditions_n'] = le_road_surface_conditions.fit_transform(df['Road_Surface_Conditions'])\n",
"df['weather_n'] = le_weather.fit_transform(df['Weather'])\n",
"df['traffic_density_n'] = le_traffic_density.fit_transform(df['Traffic_Density'])\n",
"df['road_hazards_n'] = le_road_hazards.fit_transform(df['Road_Hazards'])\n",
"#df['time_of_day_n'] = le_time_of_day.fit_transform(df['Time_of_Day'])\n",
"df['fatigue_level_n'] = le_fatigue_level.fit_transform(df['Fatigue_Level'])\n",
"df['medical_condition_n'] = le_medical_condition.fit_transform(df['Medical_Condition'])\n",
"df['speeding_n'] = le_speeding.fit_transform(df['Speeding'])\n",
"df['light_condition']=le_light.fit_transform(df['Light_Conditions'])\n",
"#df['roadtype'] = le_road_type.fit_transform(df['Road_Type'])\n",
"#df['landscape_n']=le_landscape.fit_transform(df['Landscape'])\n",
"\n",
"df = df.drop(['Light_Conditions', 'Road_Type', 'Landscape', 'Visibility', 'Road_Surface_Conditions', 'Weather', 'Traffic_Density', 'Road_Hazards', 'Time_of_Day', 'Fatigue_Level', 'Medical_Condition', 'Speeding','Driver_Age','Last_Service_Months_Ago'], axis='columns')\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "a355b768-dc55-4b75-a35a-9b7125797e72",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2024-01-16 00:23:27.443768: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/2\n",
"1920/1920 [==============================] - 7s 3ms/step - loss: 0.0174 - accuracy: 0.9957 - val_loss: 0.0157 - val_accuracy: 0.9983\n",
"Epoch 2/2\n",
"1920/1920 [==============================] - 6s 3ms/step - loss: 4.1210e-04 - accuracy: 0.9999 - val_loss: 0.0180 - val_accuracy: 0.9983\n",
"94/94 [==============================] - 0s 2ms/step\n",
"Accuracy: 0.9996666666666667\n",
"\n",
"Classification Report:\n",
" precision recall f1-score support\n",
"\n",
" 0 1.00 1.00 1.00 1168\n",
" 1 1.00 1.00 1.00 1832\n",
"\n",
" accuracy 1.00 3000\n",
" macro avg 1.00 1.00 1.00 3000\n",
"weighted avg 1.00 1.00 1.00 3000\n",
"\n"
]
}
],
"source": [
"X= df.drop('Risk_Score', axis=1)\n",
"y = df['Risk_Score'].apply(lambda x: 1 if x > 50 else 0)\n",
"\n",
"# Split the data into training and testing sets\n",
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
"\n",
"# Standardize features (important for neural networks)\n",
"scaler = StandardScaler()\n",
"X_train_scaled = scaler.fit_transform(X_train)\n",
"X_test_scaled = scaler.transform(X_test)\n",
"\n",
"# Create an Artificial Neural Network model with dropout layers\n",
"model = keras.Sequential([\n",
" layers.Dense(64, activation='relu', input_dim=X_train.shape[1]),\n",
" layers.Dropout(0.2), # Add dropout layer\n",
" layers.Dense(32, activation='relu'),\n",
" layers.Dropout(0.2), # Add dropout layer\n",
" layers.Dense(1, activation='sigmoid') # For binary classification, sigmoid activation\n",
"])\n",
"\n",
"# Compile the model\n",
"model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n",
"\n",
"# Train the model\n",
"model.fit(X_train_scaled, y_train, epochs=2, batch_size=5, validation_split=0.2)\n",
"\n",
"# Make predictions on the test set\n",
"y_pred_proba = model.predict(X_test_scaled)\n",
"y_pred = (y_pred_proba > 0.5).astype(int)\n",
"\n",
"# Evaluate the model\n",
"accuracy = accuracy_score(y_test, y_pred)\n",
"class_report = classification_report(y_test, y_pred)\n",
"\n",
"print(\"Accuracy:\", accuracy)\n",
"print(\"\\nClassification Report:\\n\", class_report)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d165f77a-a6c5-41a1-8fb7-bb95b6e481a1",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading