Skip to content

Commit f51cf9c

Browse files
authored
Add files via upload
1 parent f493c63 commit f51cf9c

File tree

1 file changed

+315
-0
lines changed

1 file changed

+315
-0
lines changed

image-segmentation_widget.ipynb

+315
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,315 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"<img src=\"header.jpg\">\n"
8+
]
9+
},
10+
{
11+
"cell_type": "markdown",
12+
"metadata": {},
13+
"source": [
14+
"<h2> <span style=\"color:#0000C0\">Introduction</span></h2>\n"
15+
]
16+
},
17+
{
18+
"cell_type": "markdown",
19+
"metadata": {},
20+
"source": [
21+
"<B>C</B>lustering is a Machine Learning technique and it can be used also in data mining that involves the grouping of data points. Given a set of data points, we can use a clustering algorithm to classify each data point into a specific group. In theory, data points that are in the same group should have similar properties and/or features, while data points in different groups should have highly dissimilar properties and/or features. Clustering is a method of unsupervised learning and is a common technique for statistical data analysis used in many fields ( AI,ML,DATA MINING ,...) </br> \n",
22+
"\n",
23+
"<B>I</B>n our case Study , we will apply clustering on Images using k-means Algorithm then we will add a noise to image and reapply k-means Algorithm then filtring noise and use k-mean algorithm on the image After filtring \n",
24+
"<B>l</B>ibraries used : <B> Sklearn and Open Cv </B> "
25+
]
26+
},
27+
{
28+
"cell_type": "markdown",
29+
"metadata": {},
30+
"source": [
31+
"## <span style=\"color:#0000C0\">Step 1 </span>: <B> Application of K-means on an Image ( Image Segmentation)</B> \n"
32+
]
33+
},
34+
{
35+
"cell_type": "code",
36+
"execution_count": 3,
37+
"metadata": {},
38+
"outputs": [
39+
{
40+
"name": "stdout",
41+
"output_type": "stream",
42+
"text": [
43+
"Requirement already satisfied: opencv-python in c:\\users\\user\\anaconda3\\lib\\site-packages (4.4.0.44)\n",
44+
"Requirement already satisfied: numpy>=1.17.3 in c:\\users\\user\\anaconda3\\lib\\site-packages (from opencv-python) (1.18.5)\n",
45+
"Note: you may need to restart the kernel to use updated packages.\n"
46+
]
47+
}
48+
],
49+
"source": [
50+
"pip install opencv-python"
51+
]
52+
},
53+
{
54+
"cell_type": "code",
55+
"execution_count": 31,
56+
"metadata": {},
57+
"outputs": [],
58+
"source": [
59+
"from ipywidgets import interact,interactive,fixed,interact_manual\n",
60+
"import ipywidgets as widgets"
61+
]
62+
},
63+
{
64+
"cell_type": "code",
65+
"execution_count": 59,
66+
"metadata": {},
67+
"outputs": [],
68+
"source": [
69+
"from sklearn.cluster import KMeans\n",
70+
"import cv2\n",
71+
"import pandas as pd\n",
72+
"import numpy as np \n",
73+
"import matplotlib.pyplot as plt\n",
74+
"\n",
75+
"def segm(k):\n",
76+
" image = cv2.imread('boy.jpg')\n",
77+
" (h1, w1) = image.shape[:2]\n",
78+
" image = image.reshape((image.shape[0] * image.shape[1], 3))\n",
79+
"\n",
80+
"\n",
81+
"\n",
82+
" clt = KMeans(n_clusters = k)\n",
83+
" labels = clt.fit_predict(image)\n",
84+
" quant = clt.cluster_centers_.astype(\"uint8\")[labels]\n",
85+
"\n",
86+
" \n",
87+
"\n",
88+
"#reshape the feature vectors to images\n",
89+
" quant = quant.reshape((h1, w1, 3))\n",
90+
" image = image.reshape((h1, w1, 3))\n",
91+
" \n",
92+
" plt.figure(figsize=(8,8))\n",
93+
" \n",
94+
" plt.imshow(image)\n",
95+
"\n",
96+
"\n",
97+
"\n"
98+
]
99+
},
100+
{
101+
"cell_type": "code",
102+
"execution_count": 60,
103+
"metadata": {
104+
"scrolled": true
105+
},
106+
"outputs": [
107+
{
108+
"data": {
109+
"application/vnd.jupyter.widget-view+json": {
110+
"model_id": "5c4fac37a2eb4a438528ca109148fe02",
111+
"version_major": 2,
112+
"version_minor": 0
113+
},
114+
"text/plain": [
115+
"interactive(children=(IntSlider(value=2, description='k', max=30, min=2), Output()), _dom_classes=('widget-int…"
116+
]
117+
},
118+
"metadata": {},
119+
"output_type": "display_data"
120+
}
121+
],
122+
"source": [
123+
"interact(segm,k=widgets.IntSlider(min=2,max=30, step=1, value=2));"
124+
]
125+
},
126+
{
127+
"cell_type": "markdown",
128+
"metadata": {},
129+
"source": [
130+
"## <span style=\"color:#0000C0\">Step 2 </span> : <B>Add Noise to image</B> \n"
131+
]
132+
},
133+
{
134+
"cell_type": "code",
135+
"execution_count": 62,
136+
"metadata": {},
137+
"outputs": [
138+
{
139+
"data": {
140+
"text/plain": [
141+
"<Figure size 1296x1728 with 0 Axes>"
142+
]
143+
},
144+
"metadata": {},
145+
"output_type": "display_data"
146+
},
147+
{
148+
"data": {
149+
"application/vnd.jupyter.widget-view+json": {
150+
"model_id": "9b96b564cc654f3f9c3e8c3b5032ed81",
151+
"version_major": 2,
152+
"version_minor": 0
153+
},
154+
"text/plain": [
155+
"interactive(children=(Dropdown(description='mode', options=('gaussian', 'localvar', 'poisson', 'salt', 'pepper…"
156+
]
157+
},
158+
"metadata": {},
159+
"output_type": "display_data"
160+
}
161+
],
162+
"source": [
163+
"import skimage.io\n",
164+
"import matplotlib.pyplot as plt\n",
165+
"img_path=\"eleph.jpg\"\n",
166+
"img = skimage.io.imread('boy.jpg')/255.0\n",
167+
"\n",
168+
"def plotnoise(mode):\n",
169+
" img_path=\"eleph.jpg\"\n",
170+
" img = skimage.io.imread('boy.jpg')/255.0\n",
171+
" \n",
172+
" if mode is not None:\n",
173+
" gimg = skimage.util.random_noise(img, mode=mode)\n",
174+
" plt.imshow(gimg)\n",
175+
" else:\n",
176+
" plt.imshow(img)\n",
177+
" plt.title(mode)\n",
178+
" plt.axis(\"off\")\n",
179+
"\n",
180+
"plt.figure(figsize=(18,24))\n",
181+
"r=4\n",
182+
"c=2\n",
183+
"interact(plotnoise,mode=[\"gaussian\",\"localvar\",\"poisson\",\"salt\",\"pepper\",\"s&p\",\"speckle\"])\n",
184+
"plt.show()"
185+
]
186+
},
187+
{
188+
"cell_type": "markdown",
189+
"metadata": {},
190+
"source": [
191+
"## <span style=\"color:#0000C0\">Step 3 </span> : <B>Denoising</B> \n"
192+
]
193+
},
194+
{
195+
"cell_type": "code",
196+
"execution_count": 94,
197+
"metadata": {},
198+
"outputs": [
199+
{
200+
"data": {
201+
"application/vnd.jupyter.widget-view+json": {
202+
"model_id": "0f105e4e681349f2bcae8877d2ef661d",
203+
"version_major": 2,
204+
"version_minor": 0
205+
},
206+
"text/plain": [
207+
"Button(description='denoising image', style=ButtonStyle())"
208+
]
209+
},
210+
"metadata": {},
211+
"output_type": "display_data"
212+
},
213+
{
214+
"data": {
215+
"application/vnd.jupyter.widget-view+json": {
216+
"model_id": "7cf5c17503404757b9146e6132c308aa",
217+
"version_major": 2,
218+
"version_minor": 0
219+
},
220+
"text/plain": [
221+
"Output()"
222+
]
223+
},
224+
"metadata": {},
225+
"output_type": "display_data"
226+
}
227+
],
228+
"source": [
229+
"import cv2 as cv\n",
230+
"from matplotlib import pyplot as plt\n",
231+
" \n",
232+
"def blur_demo(image):\n",
233+
" blur = cv.blur(image,(3,3))\n",
234+
" return blur\n",
235+
" \n",
236+
"def boxFilter_demo(image):\n",
237+
" boxFilter = cv.boxFilter(image,-1,(3,3),normalize=True)\n",
238+
" return boxFilter\n",
239+
" \n",
240+
"def boxFilterF_demo(image):\n",
241+
" boxFilterF = cv.boxFilter(image,-1,(3,3),normalize=False)\n",
242+
" return boxFilterF\n",
243+
" \n",
244+
"def Gaussian_demo(image):\n",
245+
" gaussian = cv.GaussianBlur(image,(5,5),1)\n",
246+
" return gaussian\n",
247+
" \n",
248+
"def medianBulr(image):\n",
249+
" medianbulr = cv.medianBlur(image,5)\n",
250+
" return medianbulr\n",
251+
"\n",
252+
"\n",
253+
"\n",
254+
"\n",
255+
"\n",
256+
"from IPython.display import display\n",
257+
"button = widgets.Button(description=\"denoising image\")\n",
258+
"output = widgets.Output()\n",
259+
"\n",
260+
"display(button, output)\n",
261+
"\n",
262+
"def on_button_clicked(b):\n",
263+
" with output:\n",
264+
" src = cv.imread(\"pepper.JPG\")\n",
265+
" src = src[:,:,[2,1,0]]\n",
266+
" img1 = blur_demo(src)\n",
267+
" img2 = boxFilter_demo(src)\n",
268+
" img3 = boxFilterF_demo(src)\n",
269+
" img4 = Gaussian_demo(src)\n",
270+
" img5 = medianBulr(src)\n",
271+
" plt.figure(figsize=(30,20))\n",
272+
"\n",
273+
" \n",
274+
" plt.subplot(2,3,1),plt.imshow(src)\n",
275+
" plt.title(\"noised image\"),plt.xticks([]),plt.yticks([])\n",
276+
" plt.subplot(232),plt.imshow(img1)\n",
277+
" plt.title(\"blur_image\"),plt.xticks([]),plt.yticks([])\n",
278+
" plt.subplot(233),plt.imshow(img2)\n",
279+
" plt.title(\"boxFilter_image\"),plt.xticks([]),plt.yticks([])\n",
280+
" plt.subplot(234),plt.imshow(img3)\n",
281+
" plt.title(\"boxFilterF_image\"),plt.xticks([]),plt.yticks([])\n",
282+
" plt.subplot(235),plt.imshow(img4)\n",
283+
" plt.title(\"gaussian_image\"),plt.xticks([]),plt.yticks([])\n",
284+
" plt.subplot(236),plt.imshow(img5)\n",
285+
" plt.title(\"medianBulr_image\"),plt.xticks([]),plt.yticks([])\n",
286+
" plt.show()\n",
287+
" \n",
288+
" \n",
289+
"button.on_click(on_button_clicked)\n",
290+
"\n"
291+
]
292+
}
293+
],
294+
"metadata": {
295+
"kernelspec": {
296+
"display_name": "Python 3",
297+
"language": "python",
298+
"name": "python3"
299+
},
300+
"language_info": {
301+
"codemirror_mode": {
302+
"name": "ipython",
303+
"version": 3
304+
},
305+
"file_extension": ".py",
306+
"mimetype": "text/x-python",
307+
"name": "python",
308+
"nbconvert_exporter": "python",
309+
"pygments_lexer": "ipython3",
310+
"version": "3.8.3"
311+
}
312+
},
313+
"nbformat": 4,
314+
"nbformat_minor": 4
315+
}

0 commit comments

Comments
 (0)