ibcore: Remove debug prints after allocation failure.

The prints after [k|v][m|z|c]alloc() functions are not needed,
because in case of failure, allocator will print their internal
error prints anyway.

Linux commit:
2716243212241855cd9070883779f6e58967dec5

MFC after:	1 week
Reviewed by:	kib
Sponsored by:	Mellanox Technologies // NVIDIA Networking
This commit is contained in:
Hans Petter Selasky 2021-06-16 15:01:45 +02:00
parent 468a6b5055
commit 26646ba5bc

View File

@ -827,7 +827,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
local = kmalloc(sizeof *local, GFP_ATOMIC); local = kmalloc(sizeof *local, GFP_ATOMIC);
if (!local) { if (!local) {
ret = -ENOMEM; ret = -ENOMEM;
dev_err(&device->dev, "No memory for ib_mad_local_private\n");
goto out; goto out;
} }
local->mad_priv = NULL; local->mad_priv = NULL;
@ -835,7 +834,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
if (!mad_priv) { if (!mad_priv) {
ret = -ENOMEM; ret = -ENOMEM;
dev_err(&device->dev, "No memory for local response MAD\n");
kfree(local); kfree(local);
goto out; goto out;
} }
@ -958,9 +956,6 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
if (!seg) { if (!seg) {
dev_err(&send_buf->mad_agent->device->dev,
"alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
sizeof (*seg) + seg_size, gfp_mask);
free_send_rmpp_list(send_wr); free_send_rmpp_list(send_wr);
return -ENOMEM; return -ENOMEM;
} }
@ -1373,12 +1368,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
{ {
/* Allocate management method table */ /* Allocate management method table */
*method = kzalloc(sizeof **method, GFP_ATOMIC); *method = kzalloc(sizeof **method, GFP_ATOMIC);
if (!*method) { return (*method) ? 0 : (-ENOMEM);
pr_err("No memory for ib_mad_mgmt_method_table\n");
return -ENOMEM;
}
return 0;
} }
/* /*
@ -1469,8 +1459,6 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
/* Allocate management class table for "new" class version */ /* Allocate management class table for "new" class version */
*class = kzalloc(sizeof **class, GFP_ATOMIC); *class = kzalloc(sizeof **class, GFP_ATOMIC);
if (!*class) { if (!*class) {
dev_err(&agent_priv->agent.device->dev,
"No memory for ib_mad_mgmt_class_table\n");
ret = -ENOMEM; ret = -ENOMEM;
goto error1; goto error1;
} }
@ -1535,22 +1523,16 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
if (!*vendor_table) { if (!*vendor_table) {
/* Allocate mgmt vendor class table for "new" class version */ /* Allocate mgmt vendor class table for "new" class version */
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
if (!vendor) { if (!vendor)
dev_err(&agent_priv->agent.device->dev,
"No memory for ib_mad_mgmt_vendor_class_table\n");
goto error1; goto error1;
}
*vendor_table = vendor; *vendor_table = vendor;
} }
if (!(*vendor_table)->vendor_class[vclass]) { if (!(*vendor_table)->vendor_class[vclass]) {
/* Allocate table for this management vendor class */ /* Allocate table for this management vendor class */
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
if (!vendor_class) { if (!vendor_class)
dev_err(&agent_priv->agent.device->dev,
"No memory for ib_mad_mgmt_vendor_class\n");
goto error2; goto error2;
}
(*vendor_table)->vendor_class[vclass] = vendor_class; (*vendor_table)->vendor_class[vclass] = vendor_class;
} }
@ -1560,7 +1542,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
mad_reg_req->oui, 3)) { mad_reg_req->oui, 3)) {
method = &(*vendor_table)->vendor_class[ method = &(*vendor_table)->vendor_class[
vclass]->method_table[i]; vclass]->method_table[i];
BUG_ON(!*method); if (!*method)
goto error3;
goto check_in_use; goto check_in_use;
} }
} }
@ -1570,7 +1553,6 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
vclass]->oui[i])) { vclass]->oui[i])) {
method = &(*vendor_table)->vendor_class[ method = &(*vendor_table)->vendor_class[
vclass]->method_table[i]; vclass]->method_table[i];
BUG_ON(*method);
/* Allocate method table for this OUI */ /* Allocate method table for this OUI */
if ((ret = allocate_method_table(method))) if ((ret = allocate_method_table(method)))
goto error3; goto error3;
@ -2249,11 +2231,8 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
mad_size = recv->mad_size; mad_size = recv->mad_size;
response = alloc_mad_private(mad_size, GFP_KERNEL); response = alloc_mad_private(mad_size, GFP_KERNEL);
if (!response) { if (!response)
dev_err(&port_priv->device->dev,
"%s: no memory for response buffer\n", __func__);
goto out; goto out;
}
if (rdma_cap_ib_switch(port_priv->device)) if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num; port_num = wc->port_num;
@ -2880,8 +2859,6 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
GFP_ATOMIC); GFP_ATOMIC);
if (!mad_priv) { if (!mad_priv) {
dev_err(&qp_info->port_priv->device->dev,
"No memory for receive buffer\n");
ret = -ENOMEM; ret = -ENOMEM;
break; break;
} }
@ -2972,11 +2949,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
u16 pkey_index; u16 pkey_index;
attr = kmalloc(sizeof *attr, GFP_KERNEL); attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr) { if (!attr)
dev_err(&port_priv->device->dev,
"Couldn't kmalloc ib_qp_attr\n");
return -ENOMEM; return -ENOMEM;
}
ret = ib_find_pkey(port_priv->device, port_priv->port_num, ret = ib_find_pkey(port_priv->device, port_priv->port_num,
IB_DEFAULT_PKEY_FULL, &pkey_index); IB_DEFAULT_PKEY_FULL, &pkey_index);