-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPytorch_1
1 lines (1 loc) · 10.7 KB
/
Pytorch_1
1
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":10312176,"sourceType":"datasetVersion","datasetId":6383797}],"dockerImageVersionId":30823,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:38:59.273762Z","iopub.execute_input":"2025-01-05T16:38:59.274130Z","iopub.status.idle":"2025-01-05T16:39:00.139892Z","shell.execute_reply.started":"2025-01-05T16:38:59.274103Z","shell.execute_reply":"2025-01-05T16:39:00.138950Z"}},"outputs":[{"name":"stdout","text":"/kaggle/input/mnist-digit-recognizer/Digit_Recognizer/sample_submission.csv\n/kaggle/input/mnist-digit-recognizer/Digit_Recognizer/train.csv\n/kaggle/input/mnist-digit-recognizer/Digit_Recognizer/test.csv\n","output_type":"stream"}],"execution_count":1},{"cell_type":"code","source":"dataset = pd.read_csv(\"/kaggle/input/mnist-digit-recognizer/Digit_Recognizer/train.csv\")\nXte = pd.read_csv(\"/kaggle/input/mnist-digit-recognizer/Digit_Recognizer/test.csv\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:39:00.140990Z","iopub.execute_input":"2025-01-05T16:39:00.141394Z","iopub.status.idle":"2025-01-05T16:39:04.365327Z","shell.execute_reply.started":"2025-01-05T16:39:00.141366Z","shell.execute_reply":"2025-01-05T16:39:04.364301Z"}},"outputs":[],"execution_count":2},{"cell_type":"code","source":"ytr = dataset.label\nXtr = dataset.drop(\"label\", axis=1)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:39:04.366752Z","iopub.execute_input":"2025-01-05T16:39:04.367001Z","iopub.status.idle":"2025-01-05T16:39:04.457313Z","shell.execute_reply.started":"2025-01-05T16:39:04.366979Z","shell.execute_reply":"2025-01-05T16:39:04.456566Z"}},"outputs":[],"execution_count":3},{"cell_type":"code","source":"Xtr = Xtr.to_numpy()\nytr = ytr.to_numpy()\nXte = Xte.to_numpy()","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:39:04.458260Z","iopub.execute_input":"2025-01-05T16:39:04.458521Z","iopub.status.idle":"2025-01-05T16:39:04.462372Z","shell.execute_reply.started":"2025-01-05T16:39:04.458499Z","shell.execute_reply":"2025-01-05T16:39:04.461577Z"}},"outputs":[],"execution_count":4},{"cell_type":"code","source":"Xte, Xtr, ytr","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:39:05.798833Z","iopub.execute_input":"2025-01-05T16:39:05.799143Z","iopub.status.idle":"2025-01-05T16:39:05.806317Z","shell.execute_reply.started":"2025-01-05T16:39:05.799118Z","shell.execute_reply":"2025-01-05T16:39:05.805403Z"}},"outputs":[{"execution_count":5,"output_type":"execute_result","data":{"text/plain":"(array([[0, 0, 0, ..., 0, 0, 0],\n [0, 0, 0, ..., 0, 0, 0],\n [0, 0, 0, ..., 0, 0, 0],\n ...,\n [0, 0, 0, ..., 0, 0, 0],\n [0, 0, 0, ..., 0, 0, 0],\n [0, 0, 0, ..., 0, 0, 0]]),\n array([[0, 0, 0, ..., 0, 0, 0],\n [0, 0, 0, ..., 0, 0, 0],\n [0, 0, 0, ..., 0, 0, 0],\n ...,\n [0, 0, 0, ..., 0, 0, 0],\n [0, 0, 0, ..., 0, 0, 0],\n [0, 0, 0, ..., 0, 0, 0]]),\n array([1, 0, 1, ..., 7, 6, 9]))"},"metadata":{}}],"execution_count":5},{"cell_type":"code","source":"import torch","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:39:08.145448Z","iopub.execute_input":"2025-01-05T16:39:08.145841Z","iopub.status.idle":"2025-01-05T16:39:10.927652Z","shell.execute_reply.started":"2025-01-05T16:39:08.145809Z","shell.execute_reply":"2025-01-05T16:39:10.926684Z"}},"outputs":[],"execution_count":6},{"cell_type":"code","source":"Xtr = torch.from_numpy(Xtr).to(dtype=torch.float32)\nytr = torch.from_numpy(ytr).to(dtype=torch.float32)\nXte = torch.from_numpy(Xte).to(dtype=torch.float32)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:39:10.929038Z","iopub.execute_input":"2025-01-05T16:39:10.929544Z","iopub.status.idle":"2025-01-05T16:39:11.064436Z","shell.execute_reply.started":"2025-01-05T16:39:10.929506Z","shell.execute_reply":"2025-01-05T16:39:11.063442Z"}},"outputs":[],"execution_count":7},{"cell_type":"code","source":"Xte, Xtr, ytr","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:39:12.857700Z","iopub.execute_input":"2025-01-05T16:39:12.858015Z","iopub.status.idle":"2025-01-05T16:39:12.919875Z","shell.execute_reply.started":"2025-01-05T16:39:12.857991Z","shell.execute_reply":"2025-01-05T16:39:12.919188Z"}},"outputs":[{"execution_count":8,"output_type":"execute_result","data":{"text/plain":"(tensor([[0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n ...,\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.]]),\n tensor([[0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n ...,\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.]]),\n tensor([1., 0., 1., ..., 7., 6., 9.]))"},"metadata":{}}],"execution_count":8},{"cell_type":"code","source":"class NeuralNetwork():\n def __init__(self, X):\n self.weights = torch.rand(X.shape[1], 1, dtype = torch.float32, requires_grad = True)\n self.bias = torch.zeros(1, dtype = torch.float32, requires_grad = True)\n\n def forward_pass(self, X):\n z = torch.matmul(X, self.weights) + self.bias\n y_pred = torch.sigmoid(z)\n return y_pred\n\n def loss(self, y_pred, y):\n epsilon = 1e-7\n y_pred = torch.clamp(y_pred, epsilon, 1 - epsilon)\n\n loss = -(y * torch.log(y_pred) + (1 - y)* torch.log(1-y_pred)).mean()\n return loss","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:41:57.662823Z","iopub.execute_input":"2025-01-05T16:41:57.663140Z","iopub.status.idle":"2025-01-05T16:41:57.668640Z","shell.execute_reply.started":"2025-01-05T16:41:57.663114Z","shell.execute_reply":"2025-01-05T16:41:57.667668Z"}},"outputs":[],"execution_count":16},{"cell_type":"code","source":"lr = 0.01\nepochs = 30\nbatch_size = 128","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:42:06.193213Z","iopub.execute_input":"2025-01-05T16:42:06.193595Z","iopub.status.idle":"2025-01-05T16:42:06.197229Z","shell.execute_reply.started":"2025-01-05T16:42:06.193563Z","shell.execute_reply":"2025-01-05T16:42:06.196418Z"}},"outputs":[],"execution_count":18},{"cell_type":"code","source":"from torch.utils.data import DataLoader, TensorDataset\ndataset = TensorDataset(Xtr, ytr)\ndataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:42:09.317873Z","iopub.execute_input":"2025-01-05T16:42:09.318173Z","iopub.status.idle":"2025-01-05T16:42:09.322251Z","shell.execute_reply.started":"2025-01-05T16:42:09.318150Z","shell.execute_reply":"2025-01-05T16:42:09.321405Z"}},"outputs":[],"execution_count":19},{"cell_type":"code","source":"# create model\nmodel = NeuralNetwork(Xtr)\n# in loop\nfor epoch in range(epochs):\n epoch_loss = 0.0\n for batch_X, batch_y in dataloader:\n # forward pass\n y_pred = model.forward_pass(batch_X)\n # loss\n loss = model.loss(y_pred, batch_y)\n # backward pass\n loss.backward()\n # update\n with torch.no_grad():\n model.weights -= (lr * model.weights.grad)\n model.bias -= (lr * model.bias.grad)\n \n # pass zero grad again\n model.weights.grad.zero_()\n model.bias.grad.zero_()\n\n # accumulate loss\n epoch_loss += loss.item()\n # loss print\n print(f\"Epoch {epoch + 1}/{epochs}, Loss: {epoch_loss / len(dataloader)}\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-05T16:42:12.439914Z","iopub.execute_input":"2025-01-05T16:42:12.440221Z","iopub.status.idle":"2025-01-05T16:42:48.334446Z","shell.execute_reply.started":"2025-01-05T16:42:12.440197Z","shell.execute_reply":"2025-01-05T16:42:48.333597Z"}},"outputs":[{"name":"stdout","text":"Epoch 1/30, Loss: -55.07186588183\nEpoch 2/30, Loss: -55.0533159250184\nEpoch 3/30, Loss: -55.05861594249412\nEpoch 4/30, Loss: -55.09571591409144\nEpoch 5/30, Loss: -55.12221586233214\nEpoch 6/30, Loss: -55.146065836619464\nEpoch 7/30, Loss: -55.15401585123821\nEpoch 8/30, Loss: -55.07981593123323\nEpoch 9/30, Loss: -55.09041594299502\nEpoch 10/30, Loss: -55.10101586199821\nEpoch 11/30, Loss: -55.10631586787911\nEpoch 12/30, Loss: -55.13546584804732\nEpoch 13/30, Loss: -55.10631586787911\nEpoch 14/30, Loss: -55.140765819143745\nEpoch 15/30, Loss: -55.10101591997233\nEpoch 16/30, Loss: -55.138115879974826\nEpoch 17/30, Loss: -55.10101591997233\nEpoch 18/30, Loss: -55.140765853928215\nEpoch 19/30, Loss: -55.040065933505815\nEpoch 20/30, Loss: -55.06656593392323\nEpoch 21/30, Loss: -55.10896592299627\nEpoch 22/30, Loss: -55.12221589711662\nEpoch 23/30, Loss: -55.11956588837876\nEpoch 24/30, Loss: -55.07716591090055\nEpoch 25/30, Loss: -55.12751582183374\nEpoch 26/30, Loss: -55.10896585342732\nEpoch 27/30, Loss: -55.06921593106626\nEpoch 28/30, Loss: -55.12486582469071\nEpoch 29/30, Loss: -55.06656591073358\nEpoch 30/30, Loss: -55.12486587107001\n","output_type":"stream"}],"execution_count":20},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}