{"id":1077555,"date":"2025-01-08T12:03:19","date_gmt":"2025-01-08T04:03:19","guid":{"rendered":"https:\/\/docs.pingcode.com\/ask\/ask-ask\/1077555.html"},"modified":"2025-01-08T12:03:22","modified_gmt":"2025-01-08T04:03:22","slug":"python%e4%b8%ad%e5%a6%82%e4%bd%95%e8%87%aa%e5%ae%9a%e4%b9%89%e4%bc%98%e5%8c%96%e5%99%a8-2","status":"publish","type":"post","link":"https:\/\/docs.pingcode.com\/ask\/1077555.html","title":{"rendered":"python\u4e2d\u5982\u4f55\u81ea\u5b9a\u4e49\u4f18\u5316\u5668"},"content":{"rendered":"<p style=\"text-align:center;\" ><img decoding=\"async\" src=\"https:\/\/cdn-kb.worktile.com\/kb\/wp-content\/uploads\/2024\/04\/24181541\/6b9f96df-0f8c-4b7d-9fa9-3a14ddfb5e35.webp\" alt=\"python\u4e2d\u5982\u4f55\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\" \/><\/p>\n<p><p> <strong>\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u5728Python\u4e2d\u53ef\u4ee5\u901a\u8fc7\u7ee7\u627f\u4f18\u5316\u5668\u57fa\u7c7b\u3001\u5b9a\u4e49\u81ea\u5b9a\u4e49\u7684\u4f18\u5316\u65b9\u6cd5\u3001\u8bbe\u7f6e\u53c2\u6570\u66f4\u65b0\u89c4\u5219\u6765\u5b9e\u73b0<\/strong>\u3002\u5177\u4f53\u5b9e\u73b0\u6b65\u9aa4\u5982\u4e0b\uff1a\u9996\u5148\uff0c\u901a\u8fc7\u7ee7\u627fTensorFlow\u6216PyTorch\u7b49\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\u4e2d\u7684\u4f18\u5316\u5668\u57fa\u7c7b\uff0c\u6765\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u4f18\u5316\u5668\u7c7b\u3002\u5176\u6b21\uff0c\u5b9e\u73b0\u4f18\u5316\u7b97\u6cd5\uff0c\u901a\u8fc7\u7f16\u5199\u8ba1\u7b97\u68af\u5ea6\u7684\u51fd\u6570\u548c\u53c2\u6570\u66f4\u65b0\u7684\u89c4\u5219\u6765\u5b9a\u4e49\u4f18\u5316\u65b9\u6cd5\u3002\u6700\u540e\uff0c\u6839\u636e\u9700\u8981\u8c03\u6574\u4f18\u5316\u5668\u7684\u8d85\u53c2\u6570\u548c\u914d\u7f6e\u9009\u9879\u3002\u4e0b\u9762\u8be6\u7ec6\u4ecb\u7ecd\u5982\u4f55\u5728TensorFlow\u548cPyTorch\u4e2d\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u3002<\/p>\n<\/p>\n<p><h2>\u4e00\u3001\u5728TensorFlow\u4e2d\u81ea\u5b9a\u4e49\u4f18\u5316\u5668<\/h2>\n<\/p>\n<p><p>TensorFlow\u662f\u4e00\u4e2a\u5f3a\u5927\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u652f\u6301\u7528\u6237\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u3002\u4ee5\u4e0b\u662f\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u7684\u6b65\u9aa4\u548c\u793a\u4f8b\u4ee3\u7801\u3002<\/p>\n<\/p>\n<p><h3>1\u3001\u7ee7\u627f\u4f18\u5316\u5668\u57fa\u7c7b<\/h3>\n<\/p>\n<p><p>\u9996\u5148\uff0c\u7ee7\u627f<code>tf.keras.optimizers.Optimizer<\/code>\u57fa\u7c7b\uff0c\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u4f18\u5316\u5668\u7c7b\u3002<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">import tensorflow as tf<\/p>\n<p>class CustomOptimizer(tf.keras.optimizers.Optimizer):<\/p>\n<p>    def __init__(self, learning_rate=0.01, name=&quot;CustomOptimizer&quot;, kwargs):<\/p>\n<p>        super().__init__(name, kwargs)<\/p>\n<p>        self.learning_rate = learning_rate<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>2\u3001\u5b9e\u73b0\u4f18\u5316\u7b97\u6cd5<\/h3>\n<\/p>\n<p><p>\u5728\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u7c7b\u4e2d\uff0c\u5b9e\u73b0<code>_create_slots<\/code>\u548c<code>_resource_apply_dense<\/code>\u65b9\u6cd5\u3002<code>_create_slots<\/code>\u7528\u4e8e\u521b\u5efa\u4f18\u5316\u5668\u6240\u9700\u7684\u53d8\u91cf\uff08\u5982\u52a8\u91cf\u3001\u4e8c\u9636\u52a8\u91cf\u7b49\uff09\uff0c<code>_resource_apply_dense<\/code>\u7528\u4e8e\u5b9a\u4e49\u53c2\u6570\u66f4\u65b0\u89c4\u5219\u3002<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">class CustomOptimizer(tf.keras.optimizers.Optimizer):<\/p>\n<p>    def __init__(self, learning_rate=0.01, name=&quot;CustomOptimizer&quot;, kwargs):<\/p>\n<p>        super().__init__(name, kwargs)<\/p>\n<p>        self.learning_rate = learning_rate<\/p>\n<p>    def _create_slots(self, var_list):<\/p>\n<p>        for var in var_list:<\/p>\n<p>            self.add_slot(var, &quot;m&quot;)  # \u521b\u5efa\u52a8\u91cf\u53d8\u91cf<\/p>\n<p>    def _resource_apply_dense(self, grad, var, apply_state=None):<\/p>\n<p>        lr = self.learning_rate<\/p>\n<p>        m = self.get_slot(var, &quot;m&quot;)<\/p>\n<p>        new_m = m.assign(m * 0.9 + grad * 0.1)<\/p>\n<p>        var_update = var.assign_sub(lr * new_m)<\/p>\n<p>        return var_update<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>3\u3001\u4f7f\u7528\u81ea\u5b9a\u4e49\u4f18\u5316\u5668<\/h3>\n<\/p>\n<p><p>\u521b\u5efa\u6a21\u578b\u5e76\u4f7f\u7528\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u8fdb\u884c\u8bad\u7ec3\u3002<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">model = tf.keras.Sequential([<\/p>\n<p>    tf.keras.layers.Dense(10, activation=&#39;relu&#39;),<\/p>\n<p>    tf.keras.layers.Dense(1, activation=&#39;sigmoid&#39;)<\/p>\n<p>])<\/p>\n<p>optimizer = CustomOptimizer(learning_rate=0.01)<\/p>\n<p>model.compile(optimizer=optimizer, loss=&#39;binary_crossentropy&#39;, metrics=[&#39;accuracy&#39;])<\/p>\n<h2><strong>\u5047\u8bbe\u6709\u8bad\u7ec3\u6570\u636eX_tr<a href=\"https:\/\/docs.pingcode.com\/blog\/59162.html\" target=\"_blank\">AI<\/a>n\u548cy_train<\/strong><\/h2>\n<p>model.fit(X_train, y_train, epochs=10, batch_size=32)<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h2>\u4e8c\u3001\u5728PyTorch\u4e2d\u81ea\u5b9a\u4e49\u4f18\u5316\u5668<\/h2>\n<\/p>\n<p><p>PyTorch\u4e5f\u662f\u4e00\u4e2a\u6d41\u884c\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5b83\u7684\u7075\u6d3b\u6027\u548c\u6613\u7528\u6027\u4f7f\u5f97\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u53d8\u5f97\u7b80\u5355\u3002\u4ee5\u4e0b\u662f\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u7684\u6b65\u9aa4\u548c\u793a\u4f8b\u4ee3\u7801\u3002<\/p>\n<\/p>\n<p><h3>1\u3001\u7ee7\u627f\u4f18\u5316\u5668\u57fa\u7c7b<\/h3>\n<\/p>\n<p><p>\u9996\u5148\uff0c\u7ee7\u627f<code>torch.optim.Optimizer<\/code>\u57fa\u7c7b\uff0c\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u4f18\u5316\u5668\u7c7b\u3002<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">import torch<\/p>\n<p>from torch.optim.optimizer import Optimizer, required<\/p>\n<p>class CustomOptimizer(Optimizer):<\/p>\n<p>    def __init__(self, params, lr=required):<\/p>\n<p>        defaults = dict(lr=lr)<\/p>\n<p>        super(CustomOptimizer, self).__init__(params, defaults)<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>2\u3001\u5b9e\u73b0\u4f18\u5316\u7b97\u6cd5<\/h3>\n<\/p>\n<p><p>\u5728\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u7c7b\u4e2d\uff0c\u5b9e\u73b0<code>step<\/code>\u65b9\u6cd5\uff0c\u7528\u4e8e\u5b9a\u4e49\u53c2\u6570\u66f4\u65b0\u89c4\u5219\u3002<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">class CustomOptimizer(Optimizer):<\/p>\n<p>    def __init__(self, params, lr=required):<\/p>\n<p>        defaults = dict(lr=lr)<\/p>\n<p>        super(CustomOptimizer, self).__init__(params, defaults)<\/p>\n<p>    def step(self, closure=None):<\/p>\n<p>        loss = None<\/p>\n<p>        if closure is not None:<\/p>\n<p>            loss = closure()<\/p>\n<p>        for group in self.param_groups:<\/p>\n<p>            for p in group[&#39;params&#39;]:<\/p>\n<p>                if p.grad is None:<\/p>\n<p>                    continue<\/p>\n<p>                grad = p.grad.data<\/p>\n<p>                state = self.state[p]<\/p>\n<p>                if len(state) == 0:<\/p>\n<p>                    state[&#39;step&#39;] = 0<\/p>\n<p>                    state[&#39;m&#39;] = torch.zeros_like(p.data)<\/p>\n<p>                state[&#39;step&#39;] += 1<\/p>\n<p>                m = state[&#39;m&#39;]<\/p>\n<p>                m.mul_(0.9).add_(0.1, grad)<\/p>\n<p>                p.data.add_(-group[&#39;lr&#39;], m)<\/p>\n<p>        return loss<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>3\u3001\u4f7f\u7528\u81ea\u5b9a\u4e49\u4f18\u5316\u5668<\/h3>\n<\/p>\n<p><p>\u521b\u5efa\u6a21\u578b\u5e76\u4f7f\u7528\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u8fdb\u884c\u8bad\u7ec3\u3002<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">model = torch.nn.Sequential(<\/p>\n<p>    torch.nn.Linear(10, 10),<\/p>\n<p>    torch.nn.ReLU(),<\/p>\n<p>    torch.nn.Linear(10, 1),<\/p>\n<p>    torch.nn.Sigmoid()<\/p>\n<p>)<\/p>\n<p>optimizer = CustomOptimizer(model.parameters(), lr=0.01)<\/p>\n<p>criterion = torch.nn.BCELoss()<\/p>\n<h2><strong>\u5047\u8bbe\u6709\u8bad\u7ec3\u6570\u636eX_train\u548cy_train<\/strong><\/h2>\n<p>for epoch in range(10):<\/p>\n<p>    optimizer.zero_grad()<\/p>\n<p>    output = model(X_train)<\/p>\n<p>    loss = criterion(output, y_train)<\/p>\n<p>    loss.backward()<\/p>\n<p>    optimizer.step()<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h2>\u4e09\u3001\u8d85\u53c2\u6570\u8c03\u6574\u548c\u4f18\u5316\u5668\u914d\u7f6e<\/h2>\n<\/p>\n<p><p>\u5728\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u4e2d\uff0c\u8d85\u53c2\u6570\uff08\u5982\u5b66\u4e60\u7387\u3001\u52a8\u91cf\u7cfb\u6570\u7b49\uff09\u7684\u9009\u62e9\u548c\u8c03\u6574\u975e\u5e38\u91cd\u8981\u3002\u53ef\u4ee5\u901a\u8fc7\u4f18\u5316\u5668\u7684\u6784\u9020\u51fd\u6570\u4f20\u9012\u8fd9\u4e9b\u8d85\u53c2\u6570\uff0c\u5e76\u5728\u4f18\u5316\u7b97\u6cd5\u4e2d\u4f7f\u7528\u5b83\u4eec\u3002<\/p>\n<\/p>\n<p><h3>1\u3001\u8c03\u6574\u5b66\u4e60\u7387<\/h3>\n<\/p>\n<p><p>\u5b66\u4e60\u7387\u662f\u63a7\u5236\u53c2\u6570\u66f4\u65b0\u5e45\u5ea6\u7684\u5173\u952e\u8d85\u53c2\u6570\u3002\u5728\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u4e2d\uff0c\u53ef\u4ee5\u901a\u8fc7\u4f20\u9012\u5b66\u4e60\u7387\u53c2\u6570\u6765\u8c03\u6574\u5b66\u4e60\u7387\u3002<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">class CustomOptimizer(tf.keras.optimizers.Optimizer):<\/p>\n<p>    def __init__(self, learning_rate=0.01, name=&quot;CustomOptimizer&quot;, kwargs):<\/p>\n<p>        super().__init__(name, kwargs)<\/p>\n<p>        self.learning_rate = learning_rate<\/p>\n<p>    def _resource_apply_dense(self, grad, var, apply_state=None):<\/p>\n<p>        lr = self.learning_rate  # \u4f7f\u7528\u4f20\u9012\u7684\u5b66\u4e60\u7387\u53c2\u6570<\/p>\n<p>        m = self.get_slot(var, &quot;m&quot;)<\/p>\n<p>        new_m = m.assign(m * 0.9 + grad * 0.1)<\/p>\n<p>        var_update = var.assign_sub(lr * new_m)<\/p>\n<p>        return var_update<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>2\u3001\u4f7f\u7528\u8c03\u5ea6\u5668\u52a8\u6001\u8c03\u6574\u5b66\u4e60\u7387<\/h3>\n<\/p>\n<p><p>\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\uff0c\u53ef\u4ee5\u4f7f\u7528\u5b66\u4e60\u7387\u8c03\u5ea6\u5668\u6765\u52a8\u6001\u8c03\u6574\u5b66\u4e60\u7387\u3002TensorFlow\u548cPyTorch\u90fd\u63d0\u4f9b\u4e86\u591a\u79cd\u5b66\u4e60\u7387\u8c03\u5ea6\u5668\u3002<\/p>\n<\/p>\n<p><h4>TensorFlow\u4e2d\u7684\u5b66\u4e60\u7387\u8c03\u5ea6\u5668\u793a\u4f8b\uff1a<\/h4>\n<\/p>\n<p><pre><code class=\"language-python\">initial_learning_rate = 0.01<\/p>\n<p>lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(<\/p>\n<p>    initial_learning_rate,<\/p>\n<p>    decay_steps=10000,<\/p>\n<p>    decay_rate=0.96,<\/p>\n<p>    staircase=True<\/p>\n<p>)<\/p>\n<p>optimizer = CustomOptimizer(learning_rate=lr_schedule)<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h4>PyTorch\u4e2d\u7684\u5b66\u4e60\u7387\u8c03\u5ea6\u5668\u793a\u4f8b\uff1a<\/h4>\n<\/p>\n<p><pre><code class=\"language-python\">optimizer = CustomOptimizer(model.parameters(), lr=0.01)<\/p>\n<p>scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)<\/p>\n<p>for epoch in range(100):<\/p>\n<p>    # \u8bad\u7ec3\u4ee3\u7801<\/p>\n<p>    scheduler.step()<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>3\u3001\u914d\u7f6e\u5176\u4ed6\u8d85\u53c2\u6570<\/h3>\n<\/p>\n<p><p>\u9664\u4e86\u5b66\u4e60\u7387\uff0c\u8fd8\u53ef\u4ee5\u914d\u7f6e\u5176\u4ed6\u8d85\u53c2\u6570\uff0c\u5982\u52a8\u91cf\u7cfb\u6570\u3001\u6743\u91cd\u8870\u51cf\u7b49\u3002\u53ef\u4ee5\u5c06\u8fd9\u4e9b\u8d85\u53c2\u6570\u6dfb\u52a0\u5230\u4f18\u5316\u5668\u7684\u6784\u9020\u51fd\u6570\u4e2d\uff0c\u5e76\u5728\u4f18\u5316\u7b97\u6cd5\u4e2d\u4f7f\u7528\u5b83\u4eec\u3002<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">class CustomOptimizer(Optimizer):<\/p>\n<p>    def __init__(self, params, lr=required, momentum=0.9, weight_decay=0):<\/p>\n<p>        defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)<\/p>\n<p>        super(CustomOptimizer, self).__init__(params, defaults)<\/p>\n<p>    def step(self, closure=None):<\/p>\n<p>        loss = None<\/p>\n<p>        if closure is not None:<\/p>\n<p>            loss = closure()<\/p>\n<p>        for group in self.param_groups:<\/p>\n<p>            for p in group[&#39;params&#39;]:<\/p>\n<p>                if p.grad is None:<\/p>\n<p>                    continue<\/p>\n<p>                grad = p.grad.data<\/p>\n<p>                state = self.state[p]<\/p>\n<p>                if len(state) == 0:<\/p>\n<p>                    state[&#39;step&#39;] = 0<\/p>\n<p>                    state[&#39;m&#39;] = torch.zeros_like(p.data)<\/p>\n<p>                state[&#39;step&#39;] += 1<\/p>\n<p>                m = state[&#39;m&#39;]<\/p>\n<p>                m.mul_(group[&#39;momentum&#39;]).add_(1 - group[&#39;momentum&#39;], grad)<\/p>\n<p>                p.data.add_(-group[&#39;lr&#39;], m)<\/p>\n<p>                if group[&#39;weight_decay&#39;] != 0:<\/p>\n<p>                    p.data.add_(-group[&#39;weight_decay&#39;], p.data)<\/p>\n<p>        return loss<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h2>\u56db\u3001\u603b\u7ed3<\/h2>\n<\/p>\n<p><p>\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u5728\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u8bad\u7ec3\u4e2d\u5177\u6709\u91cd\u8981\u4f5c\u7528\u3002\u901a\u8fc7\u7ee7\u627f\u4f18\u5316\u5668\u57fa\u7c7b\u3001\u5b9e\u73b0\u4f18\u5316\u7b97\u6cd5\u3001\u8bbe\u7f6e\u53c2\u6570\u66f4\u65b0\u89c4\u5219\uff0c\u53ef\u4ee5\u5728TensorFlow\u548cPyTorch\u4e2d\u521b\u5efa\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u3002\u5728\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u4e2d\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u8c03\u6574\u5b66\u4e60\u7387\u3001\u52a8\u91cf\u7cfb\u6570\u3001\u6743\u91cd\u8870\u51cf\u7b49\u8d85\u53c2\u6570\uff0c\u5e76\u4f7f\u7528\u5b66\u4e60\u7387\u8c03\u5ea6\u5668\u52a8\u6001\u8c03\u6574\u5b66\u4e60\u7387\u3002\u901a\u8fc7\u5408\u7406\u914d\u7f6e\u548c\u8c03\u6574\u4f18\u5316\u5668\u7684\u8d85\u53c2\u6570\uff0c\u53ef\u4ee5\u63d0\u9ad8\u6a21\u578b\u7684\u8bad\u7ec3\u6548\u679c\u548c\u6536\u655b\u901f\u5ea6\u3002<\/p>\n<\/p>\n<p><p>\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u53ef\u4ee5\u5e2e\u52a9\u7814\u7a76\u4eba\u5458\u548c\u5de5\u7a0b\u5e08\u63a2\u7d22\u65b0\u7684\u4f18\u5316\u65b9\u6cd5\uff0c\u89e3\u51b3\u7279\u5b9a\u95ee\u9898\uff0c\u63d0\u9ad8\u6a21\u578b\u6027\u80fd\u3002\u5728\u5b9e\u8df5\u4e2d\uff0c\u53ef\u4ee5\u6839\u636e\u5177\u4f53\u9700\u6c42\u548c\u6570\u636e\u7279\u70b9\uff0c\u8bbe\u8ba1\u548c\u5b9e\u73b0\u9002\u5408\u7684\u4f18\u5316\u7b97\u6cd5\uff0c\u5145\u5206\u53d1\u6325\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u7684\u4f18\u52bf\u3002<\/p>\n<\/p>\n<h2><strong>\u76f8\u5173\u95ee\u7b54FAQs\uff1a<\/strong><\/h2>\n<p> <strong>\u5982\u4f55\u5728Python\u4e2d\u521b\u5efa\u4e00\u4e2a\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\uff1f<\/strong><br \/>\u5728Python\u4e2d\u521b\u5efa\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u901a\u5e38\u6d89\u53ca\u5230\u7ee7\u627f\u73b0\u6709\u4f18\u5316\u5668\u7c7b\u5e76\u91cd\u5199\u76f8\u5173\u65b9\u6cd5\u3002\u53ef\u4ee5\u4ece\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\u5982TensorFlow\u6216PyTorch\u4e2d\u627e\u5230\u4f18\u5316\u5668\u7684\u57fa\u7c7b\u3002\u9996\u5148\uff0c\u5b9a\u4e49\u4e00\u4e2a\u65b0\u7684\u7c7b\u5e76\u7ee7\u627f\u81ea\u57fa\u7840\u4f18\u5316\u5668\u7c7b\uff0c\u7136\u540e\u5b9e\u73b0\u5fc5\u8981\u7684\u65b9\u6cd5\uff0c\u5982\u521d\u59cb\u5316\u53c2\u6570\u3001\u66f4\u65b0\u6743\u91cd\u548c\u8ba1\u7b97\u68af\u5ea6\u7b49\u3002\u786e\u4fdd\u5728\u8bbe\u8ba1\u65f6\u8003\u8651\u5230\u5b66\u4e60\u7387\u3001\u52a8\u91cf\u7b49\u8d85\u53c2\u6570\u7684\u7075\u6d3b\u6027\uff0c\u4ee5\u4fbf\u7528\u6237\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u8fdb\u884c\u8c03\u6574\u3002<\/p>\n<p><strong>\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u65f6\u9700\u8981\u6ce8\u610f\u54ea\u4e9b\u53c2\u6570\u8bbe\u7f6e\uff1f<\/strong><br \/>\u5728\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u65f6\uff0c\u91cd\u8981\u7684\u53c2\u6570\u5305\u62ec\u5b66\u4e60\u7387\u3001\u52a8\u91cf\u3001\u8870\u51cf\u7387\u548c\u4f18\u5316\u7b97\u6cd5\u7684\u7279\u5b9a\u53c2\u6570\u3002\u5b66\u4e60\u7387\u51b3\u5b9a\u4e86\u6743\u91cd\u66f4\u65b0\u7684\u6b65\u5e45\uff0c\u52a8\u91cf\u53ef\u4ee5\u5e2e\u52a9\u52a0\u901f\u6536\u655b\uff0c\u800c\u8870\u51cf\u7387\u5219\u6709\u52a9\u4e8e\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u8c03\u6574\u5b66\u4e60\u7387\u3002\u6b64\u5916\uff0c\u9700\u786e\u4fdd\u5b9e\u73b0\u5bf9\u68af\u5ea6\u7684\u6b63\u786e\u5904\u7406\uff0c\u4ee5\u907f\u514d\u66f4\u65b0\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684\u6570\u503c\u4e0d\u7a33\u5b9a\u95ee\u9898\u3002<\/p>\n<p><strong>\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u7684\u6027\u80fd\u5982\u4f55\u8fdb\u884c\u8bc4\u4f30\uff1f<\/strong><br \/>\u8bc4\u4f30\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u7684\u6027\u80fd\u901a\u5e38\u901a\u8fc7\u5bf9\u6bd4\u5176\u5728\u7279\u5b9a\u4efb\u52a1\u4e0a\u7684\u8bad\u7ec3\u6548\u679c\u3002\u53ef\u4ee5\u4f7f\u7528\u5e38\u89c1\u7684\u6307\u6807\uff0c\u5982\u8bad\u7ec3\u635f\u5931\u3001\u9a8c\u8bc1\u635f\u5931\u548c\u51c6\u786e\u7387\u7b49\uff0c\u6765\u8861\u91cf\u4f18\u5316\u5668\u7684\u8868\u73b0\u3002\u540c\u65f6\uff0c\u8bb0\u5f55\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u6240\u9700\u7684\u65f6\u95f4\u548c\u8d44\u6e90\u6d88\u8017\u4e5f\u662f\u91cd\u8981\u7684\u8bc4\u4f30\u6807\u51c6\u3002\u901a\u8fc7\u5bf9\u6bd4\u4e0d\u540c\u4f18\u5316\u5668\u5728\u76f8\u540c\u6a21\u578b\u548c\u6570\u636e\u96c6\u4e0a\u7684\u8868\u73b0\uff0c\u53ef\u4ee5\u66f4\u76f4\u89c2\u5730\u4e86\u89e3\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u7684\u4f18\u52a3\u3002<\/p>\n","protected":false},"excerpt":{"rendered":"\u81ea\u5b9a\u4e49\u4f18\u5316\u5668\u5728Python\u4e2d\u53ef\u4ee5\u901a\u8fc7\u7ee7\u627f\u4f18\u5316\u5668\u57fa\u7c7b\u3001\u5b9a\u4e49\u81ea\u5b9a\u4e49\u7684\u4f18\u5316\u65b9\u6cd5\u3001\u8bbe\u7f6e\u53c2\u6570\u66f4\u65b0\u89c4\u5219\u6765\u5b9e\u73b0\u3002\u5177\u4f53\u5b9e\u73b0\u6b65\u9aa4 [&hellip;]","protected":false},"author":3,"featured_media":1077559,"comment_status":"closed","ping_status":"","sticky":false,"template":"","format":"standard","meta":{"_acf_changed":false,"footnotes":""},"categories":[37],"tags":[],"acf":[],"_links":{"self":[{"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/posts\/1077555"}],"collection":[{"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/users\/3"}],"replies":[{"embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/comments?post=1077555"}],"version-history":[{"count":"1","href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/posts\/1077555\/revisions"}],"predecessor-version":[{"id":1077562,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/posts\/1077555\/revisions\/1077562"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/media\/1077559"}],"wp:attachment":[{"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/media?parent=1077555"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/categories?post=1077555"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/tags?post=1077555"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}