Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adaptive Workflow Design #49 #51

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
220 changes: 220 additions & 0 deletions Adaptive Workflow Design/Adaptive_Workflow_Design.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "os-fssuNPbN2"
},
"execution_count": 1,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"1. Data Preparation &Predictive Modeling"
],
"metadata": {
"id": "NhV6qqJuP3Ed"
}
},
{
"source": [
"import pandas as pd\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.ensemble import RandomForestRegressor\n",
"from sklearn.metrics import mean_squared_error\n",
"from sklearn.preprocessing import OneHotEncoder\n",
"\n",
"# Load historical data\n",
"data = pd.read_csv('/content/drive/MyDrive/GirlsScriptOpenSource/automarket/Adaptive Workflow Design/synthetic_workflow_data.csv')\n",
"\n",
"# Preprocess data (e.g., handling missing values, feature engineering)\n",
"data.fillna(method='ffill', inplace=True)\n",
"\n",
"# Separate numerical and categorical features\n",
"numerical_features = data.select_dtypes(include=['number']).columns\n",
"categorical_features = data.select_dtypes(include=['object']).columns\n",
"\n",
"# Apply one-hot encoding to categorical features\n",
"encoder = OneHotEncoder(handle_unknown='ignore')\n",
"encoded_data = encoder.fit_transform(data[categorical_features])\n",
"encoded_df = pd.DataFrame(encoded_data.toarray(), columns=encoder.get_feature_names_out(categorical_features))\n",
"\n",
"# Combine numerical and encoded categorical features\n",
"X = pd.concat([data[numerical_features], encoded_df], axis=1)\n",
"y = data['target']\n",
"\n",
"# Split data into training and testing sets\n",
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
"\n",
"# Train a predictive model\n",
"model = RandomForestRegressor(n_estimators=100, random_state=42)\n",
"model.fit(X_train, y_train)\n",
"\n",
"# Validate the model\n",
"y_pred = model.predict(X_test)\n",
"mse = mean_squared_error(y_test, y_pred)\n",
"print(f'Mean Squared Error: {mse}')"
],
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wMOJp1_hQWWW",
"outputId": "0f87c75c-3c40-4388-9f81-6a346836cd87"
},
"execution_count": 4,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Mean Squared Error: 4.6624224771870305e-06\n"
]
}
]
},
{
"cell_type": "markdown",
"source": [
"3. Adaptive Algorithm Implementation"
],
"metadata": {
"id": "sdBOYA5mQf1K"
}
},
{
"cell_type": "code",
"source": [
"import numpy as np\n",
"\n",
"# Function to dynamically adjust workflow parameters\n",
"def adjust_workflow(parameters, predictions):\n",
" adjusted_parameters = parameters.copy()\n",
" for param, value in parameters.items():\n",
" if param in predictions:\n",
" adjusted_parameters[param] = value * predictions[param]\n",
" return adjusted_parameters\n",
"\n",
"# Example usage\n",
"current_parameters = {'task1': 1.0, 'task2': 0.8, 'task3': 1.2}\n",
"predictions = {'task1': 1.1, 'task2': 0.9, 'task3': 1.05}\n",
"new_parameters = adjust_workflow(current_parameters, predictions)\n",
"print(new_parameters)\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Kw6Zzx_tQFed",
"outputId": "336c46d0-584b-400b-a6c8-72faa744c6f4"
},
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"{'task1': 1.1, 'task2': 0.7200000000000001, 'task3': 1.26}\n"
]
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "I0TSAD6uQk99"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"Task Prioritization and Workflow Rerouting"
],
"metadata": {
"id": "BGsXaL7oQm2z"
}
},
{
"cell_type": "code",
"source": [
"from collections import deque\n",
"\n",
"# Sample workflow tasks and their priorities\n",
"tasks = deque([\n",
" {'name': 'task1', 'priority': 1},\n",
" {'name': 'task2', 'priority': 2},\n",
" {'name': 'task3', 'priority': 3},\n",
"])\n",
"\n",
"# Function to prioritize tasks based on system load\n",
"def prioritize_tasks(tasks, system_load):\n",
" for task in tasks:\n",
" task['priority'] *= system_load\n",
" tasks = deque(sorted(tasks, key=lambda x: x['priority'], reverse=True))\n",
" return tasks\n",
"\n",
"# Example usage\n",
"system_load = 1.2\n",
"prioritized_tasks = prioritize_tasks(tasks, system_load)\n",
"print(prioritized_tasks)\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "sj91ecj5Qp8e",
"outputId": "7eb451b5-5b67-4f08-cfcf-98d5a903f4a5"
},
"execution_count": 6,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"deque([{'name': 'task3', 'priority': 3.5999999999999996}, {'name': 'task2', 'priority': 2.4}, {'name': 'task1', 'priority': 1.2}])\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# We can do Integration and Real-Time Monitoring with kafka if we needed for that we need to save this model and we can use there this model"
],
"metadata": {
"id": "iC15QxomQrQ4"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "j9IK9crfQxMJ"
},
"execution_count": null,
"outputs": []
}
]
}
6 changes: 6 additions & 0 deletions Adaptive Workflow Design/requirement.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
here the important libraries which have imported during the project-
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import OneHotEncoder
Loading