Backout 356693. The libsa malloc does provide necessary alignment and

memalign by 4 will reduce alignment for some platforms. Thanks for Ian for
pointing this out.
This commit is contained in:
Toomas Soome 2020-01-13 20:02:27 +00:00
parent 5248d3b1b2
commit aaeffe5b70
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=356700
4 changed files with 4 additions and 4 deletions

View File

@ -178,7 +178,7 @@ efinet_get(struct iodesc *desc, void **pkt, time_t timeout)
return (ret);
bufsz = net->Mode->MaxPacketSize + ETHER_HDR_LEN + ETHER_CRC_LEN;
buf = memalign(4, bufsz + ETHER_ALIGN);
buf = malloc(bufsz + ETHER_ALIGN);
if (buf == NULL)
return (ret);
ptr = buf + ETHER_ALIGN;

View File

@ -484,7 +484,7 @@ pxe_netif_receive(void **pkt)
}
size = isr->FrameLength;
buf = memalign(4, size + ETHER_ALIGN);
buf = malloc(size + ETHER_ALIGN);
if (buf == NULL) {
bio_free(isr, sizeof(*isr));
return (-1);

View File

@ -142,7 +142,7 @@ ofwn_get(struct iodesc *desc, void **pkt, time_t timeout)
* a small shortcut here.
*/
len = ETHER_MAX_LEN;
buf = memalign(4, len + ETHER_ALIGN);
buf = malloc(len + ETHER_ALIGN);
if (buf == NULL)
return (-1);
ptr = buf + ETHER_ALIGN;

View File

@ -302,7 +302,7 @@ net_get(struct iodesc *desc, void **pkt, time_t timeout)
#endif
if (rlen > 0) {
buf = memalign(4, rlen + ETHER_ALIGN);
buf = malloc(rlen + ETHER_ALIGN);
if (buf == NULL)
return (-1);
memcpy(buf + ETHER_ALIGN, sc->sc_rxbuf, rlen);