Skip to content

Commit

Permalink
Rebuild
Browse files Browse the repository at this point in the history
  • Loading branch information
9bow committed Jun 13, 2024
1 parent e8ebbe5 commit 3685523
Show file tree
Hide file tree
Showing 800 changed files with 330,395 additions and 282,836 deletions.
2 changes: 1 addition & 1 deletion docs/.buildinfo
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 2ea50c3fc7521286641fcdb5b1d32edb
config: c57d324f2978132e4ba3dbcf923648de
tags: 645f666f9bcd5a90fca523b33c5a78b7
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# PyTorch: \ud150\uc11c(Tensor)\uc640 autograd\n\n$y=\\sin(x)$ \uc744 \uc608\uce21\ud560 \uc218 \uc788\ub3c4\ub85d, $-\\pi$ \ubd80\ud130 $\\pi$ \uae4c\uc9c0\n\uc720\ud074\ub9ac\ub4dc \uac70\ub9ac(Euclidean distance)\ub97c \ucd5c\uc18c\ud654\ud558\ub3c4\ub85d 3\ucc28 \ub2e4\ud56d\uc2dd\uc744 \ud559\uc2b5\ud569\ub2c8\ub2e4.\n\n\uc774 \uad6c\ud604\uc740 PyTorch \ud150\uc11c \uc5f0\uc0b0\uc744 \uc0ac\uc6a9\ud558\uc5ec \uc21c\uc804\ud30c \ub2e8\uacc4\ub97c \uacc4\uc0b0\ud558\uace0, PyTorch autograd\ub97c \uc0ac\uc6a9\ud558\uc5ec\n\ubcc0\ud654\ub3c4(gradient)\ub97c \uacc4\uc0b0\ud569\ub2c8\ub2e4.\n\nPyTorch \ud150\uc11c\ub294 \uc5f0\uc0b0 \uadf8\ub798\ud504\uc5d0\uc11c \ub178\ub4dc(node)\ub85c \ud45c\ud604\ub429\ub2c8\ub2e4. \ub9cc\uc57d ``x`` \uac00 ``x.requires_grad=True`` \uc778\n\ud150\uc11c\ub77c\uba74, ``x.grad`` \ub294 \uc5b4\ub5a4 \uc2a4\uce7c\ub77c \uac12\uc5d0 \ub300\ud55c ``x`` \uc758 \ubcc0\ud654\ub3c4\ub97c \uac16\ub294 \ub610\ub2e4\ub978 \ud150\uc11c\uc785\ub2c8\ub2e4.\n"
"PyTorch: \ud150\uc11c(Tensor)\uc640 autograd\n================================\n\n$y=\\sin(x)$ \uc744 \uc608\uce21\ud560 \uc218 \uc788\ub3c4\ub85d, $-\\pi$ \ubd80\ud130 $\\pi$ \uae4c\uc9c0 \uc720\ud074\ub9ac\ub4dc\n\uac70\ub9ac(Euclidean distance)\ub97c \ucd5c\uc18c\ud654\ud558\ub3c4\ub85d 3\ucc28 \ub2e4\ud56d\uc2dd\uc744 \ud559\uc2b5\ud569\ub2c8\ub2e4.\n\n\uc774 \uad6c\ud604\uc740 PyTorch \ud150\uc11c \uc5f0\uc0b0\uc744 \uc0ac\uc6a9\ud558\uc5ec \uc21c\uc804\ud30c \ub2e8\uacc4\ub97c \uacc4\uc0b0\ud558\uace0, PyTorch\nautograd\ub97c \uc0ac\uc6a9\ud558\uc5ec \ubcc0\ud654\ub3c4(gradient)\ub97c \uacc4\uc0b0\ud569\ub2c8\ub2e4.\n\nPyTorch \ud150\uc11c\ub294 \uc5f0\uc0b0 \uadf8\ub798\ud504\uc5d0\uc11c \ub178\ub4dc(node)\ub85c \ud45c\ud604\ub429\ub2c8\ub2e4. \ub9cc\uc57d `x` \uac00\n`x.requires_grad=True` \uc778 \ud150\uc11c\ub77c\uba74, `x.grad` \ub294 \uc5b4\ub5a4 \uc2a4\uce7c\ub77c \uac12\uc5d0 \ub300\ud55c\n`x` \uc758 \ubcc0\ud654\ub3c4\ub97c \uac16\ub294 \ub610\ub2e4\ub978 \ud150\uc11c\uc785\ub2c8\ub2e4.\n"
]
},
{
Expand All @@ -26,7 +26,7 @@
},
"outputs": [],
"source": [
"import torch\nimport math\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") # GPU\uc5d0\uc11c \uc2e4\ud589\ud558\ub824\uba74 \uc774 \uc8fc\uc11d\uc744 \uc81c\uac70\ud558\uc138\uc694\n\n# \uc785\ub825\uac12\uacfc \ucd9c\ub825\uac12\uc744 \uac16\ub294 \ud150\uc11c\ub4e4\uc744 \uc0dd\uc131\ud569\ub2c8\ub2e4.\n# requires_grad=False\uac00 \uae30\ubcf8\uac12\uc73c\ub85c \uc124\uc815\ub418\uc5b4 \uc5ed\uc804\ud30c \ub2e8\uacc4 \uc911\uc5d0 \uc774 \ud150\uc11c\ub4e4\uc5d0 \ub300\ud55c \ubcc0\ud654\ub3c4\ub97c\n# \uacc4\uc0b0\ud560 \ud544\uc694\uac00 \uc5c6\uc74c\uc744 \ub098\ud0c0\ub0c5\ub2c8\ub2e4.\nx = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)\ny = torch.sin(x)\n\n# \uac00\uc911\uce58\ub97c \uac16\ub294 \uc784\uc758\uc758 \ud150\uc11c\ub97c \uc0dd\uc131\ud569\ub2c8\ub2e4. 3\ucc28 \ub2e4\ud56d\uc2dd\uc774\ubbc0\ub85c 4\uac1c\uc758 \uac00\uc911\uce58\uac00 \ud544\uc694\ud569\ub2c8\ub2e4:\n# y = a + b x + c x^2 + d x^3\n# requires_grad=True\ub85c \uc124\uc815\ud558\uc5ec \uc5ed\uc804\ud30c \ub2e8\uacc4 \uc911\uc5d0 \uc774 \ud150\uc11c\ub4e4\uc5d0 \ub300\ud55c \ubcc0\ud654\ub3c4\ub97c \uacc4\uc0b0\ud560 \ud544\uc694\uac00\n# \uc788\uc74c\uc744 \ub098\ud0c0\ub0c5\ub2c8\ub2e4.\na = torch.randn((), device=device, dtype=dtype, requires_grad=True)\nb = torch.randn((), device=device, dtype=dtype, requires_grad=True)\nc = torch.randn((), device=device, dtype=dtype, requires_grad=True)\nd = torch.randn((), device=device, dtype=dtype, requires_grad=True)\n\nlearning_rate = 1e-6\nfor t in range(2000):\n # \uc21c\uc804\ud30c \ub2e8\uacc4: \ud150\uc11c\ub4e4 \uac04\uc758 \uc5f0\uc0b0\uc744 \uc0ac\uc6a9\ud558\uc5ec \uc608\uce21\uac12 y\ub97c \uacc4\uc0b0\ud569\ub2c8\ub2e4.\n y_pred = a + b * x + c * x ** 2 + d * x ** 3\n\n # \ud150\uc11c\ub4e4\uac04\uc758 \uc5f0\uc0b0\uc744 \uc0ac\uc6a9\ud558\uc5ec \uc190\uc2e4(loss)\uc744 \uacc4\uc0b0\ud558\uace0 \ucd9c\ub825\ud569\ub2c8\ub2e4.\n # \uc774 \ub54c \uc190\uc2e4\uc740 (1,) shape\uc744 \uac16\ub294 \ud150\uc11c\uc785\ub2c8\ub2e4.\n # loss.item() \uc73c\ub85c \uc190\uc2e4\uc774 \uac16\uace0 \uc788\ub294 \uc2a4\uce7c\ub77c \uac12\uc744 \uac00\uc838\uc62c \uc218 \uc788\uc2b5\ub2c8\ub2e4.\n loss = (y_pred - y).pow(2).sum()\n if t % 100 == 99:\n print(t, loss.item())\n\n # autograd \ub97c \uc0ac\uc6a9\ud558\uc5ec \uc5ed\uc804\ud30c \ub2e8\uacc4\ub97c \uacc4\uc0b0\ud569\ub2c8\ub2e4. \uc774\ub294 requires_grad=True\ub97c \uac16\ub294\n # \ubaa8\ub4e0 \ud150\uc11c\ub4e4\uc5d0 \ub300\ud55c \uc190\uc2e4\uc758 \ubcc0\ud654\ub3c4\ub97c \uacc4\uc0b0\ud569\ub2c8\ub2e4.\n # \uc774\ud6c4 a.grad\uc640 b.grad, c.grad, d.grad\ub294 \uac01\uac01 a, b, c, d\uc5d0 \ub300\ud55c \uc190\uc2e4\uc758 \ubcc0\ud654\ub3c4\ub97c\n # \uac16\ub294 \ud150\uc11c\uac00 \ub429\ub2c8\ub2e4.\n loss.backward()\n\n # \uacbd\uc0ac\ud558\uac15\ubc95(gradient descent)\uc744 \uc0ac\uc6a9\ud558\uc5ec \uac00\uc911\uce58\ub97c \uc9c1\uc811 \uac31\uc2e0\ud569\ub2c8\ub2e4.\n # torch.no_grad()\ub85c \uac10\uc2f8\ub294 \uc774\uc720\ub294, \uac00\uc911\uce58\ub4e4\uc774 requires_grad=True \uc9c0\ub9cc\n # autograd\uc5d0\uc11c\ub294 \uc774\ub97c \ucd94\uc801\ud558\uc9c0 \uc54a\uc744 \uac83\uc774\uae30 \ub54c\ubb38\uc785\ub2c8\ub2e4.\n with torch.no_grad():\n a -= learning_rate * a.grad\n b -= learning_rate * b.grad\n c -= learning_rate * c.grad\n d -= learning_rate * d.grad\n\n # \uac00\uc911\uce58 \uac31\uc2e0 \ud6c4\uc5d0\ub294 \ubcc0\ud654\ub3c4\ub97c \uc9c1\uc811 0\uc73c\ub85c \ub9cc\ub4ed\ub2c8\ub2e4.\n a.grad = None\n b.grad = None\n c.grad = None\n d.grad = None\n\nprint(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')"
"import torch\nimport math\n\ndtype = torch.float\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ntorch.set_default_device(device)\n\n# \uc785\ub825\uac12\uacfc \ucd9c\ub825\uac12\uc744 \uac16\ub294 \ud150\uc11c\ub4e4\uc744 \uc0dd\uc131\ud569\ub2c8\ub2e4.\n# requires_grad=False\uac00 \uae30\ubcf8\uac12\uc73c\ub85c \uc124\uc815\ub418\uc5b4 \uc5ed\uc804\ud30c \ub2e8\uacc4 \uc911\uc5d0 \uc774 \ud150\uc11c\ub4e4\uc5d0 \ub300\ud55c \ubcc0\ud654\ub3c4\ub97c\n# \uacc4\uc0b0\ud560 \ud544\uc694\uac00 \uc5c6\uc74c\uc744 \ub098\ud0c0\ub0c5\ub2c8\ub2e4.\nx = torch.linspace(-math.pi, math.pi, 2000, dtype=dtype)\ny = torch.sin(x)\n\n# \uac00\uc911\uce58\ub97c \uac16\ub294 \uc784\uc758\uc758 \ud150\uc11c\ub97c \uc0dd\uc131\ud569\ub2c8\ub2e4. 3\ucc28 \ub2e4\ud56d\uc2dd\uc774\ubbc0\ub85c 4\uac1c\uc758 \uac00\uc911\uce58\uac00 \ud544\uc694\ud569\ub2c8\ub2e4:\n# y = a + b x + c x^2 + d x^3\n# requires_grad=True\ub85c \uc124\uc815\ud558\uc5ec \uc5ed\uc804\ud30c \ub2e8\uacc4 \uc911\uc5d0 \uc774 \ud150\uc11c\ub4e4\uc5d0 \ub300\ud55c \ubcc0\ud654\ub3c4\ub97c \uacc4\uc0b0\ud560 \ud544\uc694\uac00\n# \uc788\uc74c\uc744 \ub098\ud0c0\ub0c5\ub2c8\ub2e4.\na = torch.randn((), dtype=dtype, requires_grad=True)\nb = torch.randn((), dtype=dtype, requires_grad=True)\nc = torch.randn((), dtype=dtype, requires_grad=True)\nd = torch.randn((), dtype=dtype, requires_grad=True)\n\nlearning_rate = 1e-6\nfor t in range(2000):\n # \uc21c\uc804\ud30c \ub2e8\uacc4: \ud150\uc11c\ub4e4 \uac04\uc758 \uc5f0\uc0b0\uc744 \uc0ac\uc6a9\ud558\uc5ec \uc608\uce21\uac12 y\ub97c \uacc4\uc0b0\ud569\ub2c8\ub2e4.\n y_pred = a + b * x + c * x ** 2 + d * x ** 3\n\n # \ud150\uc11c\ub4e4\uac04\uc758 \uc5f0\uc0b0\uc744 \uc0ac\uc6a9\ud558\uc5ec \uc190\uc2e4(loss)\uc744 \uacc4\uc0b0\ud558\uace0 \ucd9c\ub825\ud569\ub2c8\ub2e4.\n # \uc774 \ub54c \uc190\uc2e4\uc740 (1,) shape\uc744 \uac16\ub294 \ud150\uc11c\uc785\ub2c8\ub2e4.\n # loss.item() \uc73c\ub85c \uc190\uc2e4\uc774 \uac16\uace0 \uc788\ub294 \uc2a4\uce7c\ub77c \uac12\uc744 \uac00\uc838\uc62c \uc218 \uc788\uc2b5\ub2c8\ub2e4.\n loss = (y_pred - y).pow(2).sum()\n if t % 100 == 99:\n print(t, loss.item())\n\n # autograd \ub97c \uc0ac\uc6a9\ud558\uc5ec \uc5ed\uc804\ud30c \ub2e8\uacc4\ub97c \uacc4\uc0b0\ud569\ub2c8\ub2e4. \uc774\ub294 requires_grad=True\ub97c \uac16\ub294\n # \ubaa8\ub4e0 \ud150\uc11c\ub4e4\uc5d0 \ub300\ud55c \uc190\uc2e4\uc758 \ubcc0\ud654\ub3c4\ub97c \uacc4\uc0b0\ud569\ub2c8\ub2e4.\n # \uc774\ud6c4 a.grad\uc640 b.grad, c.grad, d.grad\ub294 \uac01\uac01 a, b, c, d\uc5d0 \ub300\ud55c \uc190\uc2e4\uc758 \ubcc0\ud654\ub3c4\ub97c\n # \uac16\ub294 \ud150\uc11c\uac00 \ub429\ub2c8\ub2e4.\n loss.backward()\n\n # \uacbd\uc0ac\ud558\uac15\ubc95(gradient descent)\uc744 \uc0ac\uc6a9\ud558\uc5ec \uac00\uc911\uce58\ub97c \uc9c1\uc811 \uac31\uc2e0\ud569\ub2c8\ub2e4.\n # torch.no_grad()\ub85c \uac10\uc2f8\ub294 \uc774\uc720\ub294, \uac00\uc911\uce58\ub4e4\uc774 requires_grad=True \uc9c0\ub9cc\n # autograd\uc5d0\uc11c\ub294 \uc774\ub97c \ucd94\uc801\ud558\uc9c0 \uc54a\uc744 \uac83\uc774\uae30 \ub54c\ubb38\uc785\ub2c8\ub2e4.\n with torch.no_grad():\n a -= learning_rate * a.grad\n b -= learning_rate * b.grad\n c -= learning_rate * c.grad\n d -= learning_rate * d.grad\n\n # \uac00\uc911\uce58 \uac31\uc2e0 \ud6c4\uc5d0\ub294 \ubcc0\ud654\ub3c4\ub97c \uc9c1\uc811 0\uc73c\ub85c \ub9cc\ub4ed\ub2c8\ub2e4.\n a.grad = None\n b.grad = None\n c.grad = None\n d.grad = None\n\nprint(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')"
]
}
],
Expand All @@ -46,7 +46,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
"version": "3.10.14"
}
},
"nbformat": 4,
Expand Down
Loading

0 comments on commit 3685523

Please sign in to comment.