使用 Python 的 numpy 实现随机梯度下降

Stochastic gradient descent implementation with Python's numpy

我必须使用 python numpy 库来实现随机梯度下降。为此,我给出了以下函数定义:

def compute_stoch_gradient(y, tx, w):

 """Compute a stochastic gradient for batch data."""



def stochastic_gradient_descent(

    y, tx, initial_w, batch_size, max_epochs, gamma):

 """Stochastic gradient descent algorithm."""

def batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):

 """

  Generate a minibatch iterator for a dataset.

  Takes as input two iterables (here the output desired values 'y' and the input data 'tx')

  Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.

  Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.

  Example of use :

  for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):

    <DO-SOMETHING>

 """

  data_size = len(y)



  if shuffle:

    shuffle_indices = np.random.permutation(np.arange(data_size))

    shuffled_y = y[shuffle_indices]

    shuffled_tx = tx[shuffle_indices]

  else:

    shuffled_y = y

    shuffled_tx = tx

  for batch_num in range(num_batches):

    start_index = batch_num * batch_size

    end_index = min((batch_num + 1) * batch_size, data_size)

    if start_index != end_index:

      yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]

def compute_stoch_gradient(y, tx, w):

 """Compute a stochastic gradient for batch data."""

  e = y - tx.dot(w)

  return (-1/y.shape[0])*tx.transpose().dot(e)





def stochastic_gradient_descent(y, tx, initial_w, batch_size, max_epochs, gamma):

 """Stochastic gradient descent algorithm."""

  ws = [initial_w]

  losses = []

  w = initial_w

  for n_iter in range(max_epochs):

    for minibatch_y,minibatch_x in batch_iter(y,tx,batch_size):

      w = ws[n_iter] - gamma * compute_stoch_gradient(minibatch_y,minibatch_x,ws[n_iter])

      ws.append(np.copy(w))

      loss = y - tx.dot(w)

      losses.append(loss)



  return losses, ws

我还获得了以下帮助功能:

def compute_stoch_gradient(y, tx, w):

 """Compute a stochastic gradient for batch data."""



def stochastic_gradient_descent(

    y, tx, initial_w, batch_size, max_epochs, gamma):

 """Stochastic gradient descent algorithm."""

def batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):

 """

  Generate a minibatch iterator for a dataset.

  Takes as input two iterables (here the output desired values 'y' and the input data 'tx')

  Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.

  Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.

  Example of use :

  for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):

    <DO-SOMETHING>

 """

  data_size = len(y)



  if shuffle:

    shuffle_indices = np.random.permutation(np.arange(data_size))

    shuffled_y = y[shuffle_indices]

    shuffled_tx = tx[shuffle_indices]

  else:

    shuffled_y = y

    shuffled_tx = tx

  for batch_num in range(num_batches):

    start_index = batch_num * batch_size

    end_index = min((batch_num + 1) * batch_size, data_size)

    if start_index != end_index:

      yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]

def compute_stoch_gradient(y, tx, w):

 """Compute a stochastic gradient for batch data."""

  e = y - tx.dot(w)

  return (-1/y.shape[0])*tx.transpose().dot(e)





def stochastic_gradient_descent(y, tx, initial_w, batch_size, max_epochs, gamma):

 """Stochastic gradient descent algorithm."""

  ws = [initial_w]

  losses = []

  w = initial_w

  for n_iter in range(max_epochs):

    for minibatch_y,minibatch_x in batch_iter(y,tx,batch_size):

      w = ws[n_iter] - gamma * compute_stoch_gradient(minibatch_y,minibatch_x,ws[n_iter])

      ws.append(np.copy(w))

      loss = y - tx.dot(w)

      losses.append(loss)



  return losses, ws

我实现了以下两个功能:

def compute_stoch_gradient(y, tx, w):

 """Compute a stochastic gradient for batch data."""



def stochastic_gradient_descent(

    y, tx, initial_w, batch_size, max_epochs, gamma):

 """Stochastic gradient descent algorithm."""

def batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):

 """

  Generate a minibatch iterator for a dataset.

  Takes as input two iterables (here the output desired values 'y' and the input data 'tx')

  Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.

  Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.

  Example of use :

  for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):

    <DO-SOMETHING>

 """

  data_size = len(y)



  if shuffle:

    shuffle_indices = np.random.permutation(np.arange(data_size))

    shuffled_y = y[shuffle_indices]

    shuffled_tx = tx[shuffle_indices]

  else:

    shuffled_y = y

    shuffled_tx = tx

  for batch_num in range(num_batches):

    start_index = batch_num * batch_size

    end_index = min((batch_num + 1) * batch_size, data_size)

    if start_index != end_index:

      yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]

def compute_stoch_gradient(y, tx, w):

 """Compute a stochastic gradient for batch data."""

  e = y - tx.dot(w)

  return (-1/y.shape[0])*tx.transpose().dot(e)





def stochastic_gradient_descent(y, tx, initial_w, batch_size, max_epochs, gamma):

 """Stochastic gradient descent algorithm."""

  ws = [initial_w]

  losses = []

  w = initial_w

  for n_iter in range(max_epochs):

    for minibatch_y,minibatch_x in batch_iter(y,tx,batch_size):

      w = ws[n_iter] - gamma * compute_stoch_gradient(minibatch_y,minibatch_x,ws[n_iter])

      ws.append(np.copy(w))

      loss = y - tx.dot(w)

      losses.append(loss)



  return losses, ws

我不确定迭代应该在 range(max_epochs) 还是更大的范围内完成。我这样说是因为我读到一个纪元是"每次我们遍历整个数据集"。所以我认为一个时代包含多个迭代......


在典型的实现中,批量大小为 B 的小批量梯度下降应该从数据集中随机选择 B 个数据点,并根据该子集上计算的梯度更新权重。这个过程本身将持续很多次,直到收敛或某个阈值最大迭代。 B=1 的 Mini-batch 是 SGD,有时会很吵。

除了上述评论之外,您可能还想尝试一下批量大小和学习率(步长),因为它们对随机和小批量梯度下降的收敛速度有显着影响。

下图显示了在对亚马逊产品评论数据集进行情感分析时,这两个参数对 SGDlogistic regression 的收敛速度的影响,该作业出现在机器学习 - 分类的课程中华盛顿大学:

使用 Python 的 numpy 实现随机梯度下降

使用 Python 的 numpy 实现随机梯度下降

有关这方面的更多详细信息,您可以参考 https://sandipanweb.wordpress.com/2017/03/31/online-learning-sentiment-analysis-with-logistic-regression-via-stochastic-gradient-ascent /?frame-nonce=987e584e16


相关推荐

  • Spring部署设置openshift

    Springdeploymentsettingsopenshift我有一个问题让我抓狂了三天。我根据OpenShift帐户上的教程部署了spring-eap6-quickstart代码。我已配置调试选项,并且已将Eclipse工作区与OpehShift服务器同步-服务器上的一切工作正常,但在Eclipse中出现无法消除的错误。我有这个错误:cvc-complex-type.2.4.a:Invali…
    2025-04-161
  • 检查Java中正则表达式中模式的第n次出现

    CheckfornthoccurrenceofpatterninregularexpressioninJava本问题已经有最佳答案,请猛点这里访问。我想使用Java正则表达式检查输入字符串中特定模式的第n次出现。你能建议怎么做吗?这应该可以工作:MatchResultfindNthOccurance(intn,Patternp,CharSequencesrc){Matcherm=p.matcher…
    2025-04-161
  • 如何让 JTable 停留在已编辑的单元格上

    HowtohaveJTablestayingontheeditedcell如果有人编辑JTable的单元格内容并按Enter,则内容会被修改并且表格选择会移动到下一行。是否可以禁止JTable在单元格编辑后转到下一行?原因是我的程序使用ListSelectionListener在单元格选择上同步了其他一些小部件,并且我不想在编辑当前单元格后选择下一行。Enter的默认绑定是名为selectNext…
    2025-04-161
  • Weblogic 12c 部署

    Weblogic12cdeploy我正在尝试将我的应用程序从Tomcat迁移到Weblogic12.2.1.3.0。我能够毫无错误地部署应用程序,但我遇到了与持久性提供程序相关的运行时错误。这是堆栈跟踪:javax.validation.ValidationException:CalltoTraversableResolver.isReachable()threwanexceptionatorg.…
    2025-04-161
  • Resteasy Content-Type 默认值

    ResteasyContent-Typedefaults我正在使用Resteasy编写一个可以返回JSON和XML的应用程序,但可以选择默认为XML。这是我的方法:@GET@Path("/content")@Produces({MediaType.APPLICATION_XML,MediaType.APPLICATION_JSON})publicStringcontentListRequestXm…
    2025-04-161
  • 代码不会停止运行,在 Java 中

    thecodedoesn'tstoprunning,inJava我正在用Java解决项目Euler中的问题10,即"Thesumoftheprimesbelow10is2+3+5+7=17.Findthesumofalltheprimesbelowtwomillion."我的代码是packageprojecteuler_1;importjava.math.BigInteger;importjava…
    2025-04-161
  • Out of memory java heap space

    Outofmemoryjavaheapspace我正在尝试将大量文件从服务器发送到多个客户端。当我尝试发送大小为700mb的文件时,它显示了"OutOfMemoryjavaheapspace"错误。我正在使用Netbeans7.1.2版本。我还在属性中尝试了VMoption。但仍然发生同样的错误。我认为阅读整个文件存在一些问题。下面的代码最多可用于300mb。请给我一些建议。提前致谢publicc…
    2025-04-161
  • Log4j 记录到共享日志文件

    Log4jLoggingtoaSharedLogFile有没有办法将log4j日志记录事件写入也被其他应用程序写入的日志文件。其他应用程序可以是非Java应用程序。有什么缺点?锁定问题?格式化?Log4j有一个SocketAppender,它将向服务发送事件,您可以自己实现或使用与Log4j捆绑的简单实现。它还支持syslogd和Windows事件日志,这对于尝试将日志输出与来自非Java应用程序…
    2025-04-161