Index: sys/dev/ic/rtl8169.c =================================================================== RCS file: /cvsroot/src/sys/dev/ic/rtl8169.c,v retrieving revision 1.179 diff -p -u -r1.179 rtl8169.c --- sys/dev/ic/rtl8169.c 12 Aug 2024 21:27:34 -0000 1.179 +++ sys/dev/ic/rtl8169.c 27 Nov 2025 11:29:36 -0000 @@ -145,6 +145,10 @@ __KERNEL_RCSID(0, "$NetBSD: rtl8169.c,v #include +#define RE_LOCK(sc) mutex_enter(&(sc)->sc_lock) +#define RE_UNLOCK(sc) mutex_exit(&(sc)->sc_lock) +#define RE_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_lock)) + static inline void re_set_bufaddr(struct re_desc *, bus_addr_t); static int re_newbuf(struct rtk_softc *, int, struct mbuf *); @@ -153,10 +157,15 @@ static int re_tx_list_init(struct rtk_so static void re_rxeof(struct rtk_softc *); static void re_txeof(struct rtk_softc *); static void re_tick(void *); +static void re_tick_locked(struct rtk_softc *); static void re_start(struct ifnet *); +static void re_start_locked(struct rtk_softc *); static int re_ioctl(struct ifnet *, u_long, void *); +static int re_ifflags_cb(struct ethercom *); static int re_init(struct ifnet *); +static int re_init_locked(struct rtk_softc *); static void re_stop(struct ifnet *, int); +static void re_stop_locked(struct rtk_softc *, int); static void re_watchdog(struct ifnet *); static int re_enable(struct rtk_softc *); @@ -295,19 +304,15 @@ re_miibus_readreg(device_t dev, int phy, { struct rtk_softc *sc = device_private(dev); uint16_t re8139_reg = 0; - int s, rv = 0; - - s = splnet(); + int rv = 0; if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { rv = re_gmii_readreg(dev, phy, reg, val); - splx(s); return rv; } /* Pretend the internal PHY is only at address 0 */ if (phy) { - splx(s); return -1; } switch (reg) { @@ -329,7 +334,6 @@ re_miibus_readreg(device_t dev, int phy, case MII_PHYIDR1: case MII_PHYIDR2: *val = 0; - splx(s); return 0; /* * Allow the rlphy driver to read the media status @@ -339,11 +343,9 @@ re_miibus_readreg(device_t dev, int phy, */ case RTK_MEDIASTAT: *val = CSR_READ_1(sc, RTK_MEDIASTAT); - splx(s); return 0; default: printf("%s: bad phy register\n", device_xname(sc->sc_dev)); - splx(s); return -1; } *val = CSR_READ_2(sc, re8139_reg); @@ -351,7 +353,6 @@ re_miibus_readreg(device_t dev, int phy, /* 8139C+ has different bit layout. */ *val &= ~(BMCR_LOOP | BMCR_ISO); } - splx(s); return 0; } @@ -360,19 +361,15 @@ re_miibus_writereg(device_t dev, int phy { struct rtk_softc *sc = device_private(dev); uint16_t re8139_reg = 0; - int s, rv; - - s = splnet(); + int rv; if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { rv = re_gmii_writereg(dev, phy, reg, val); - splx(s); return rv; } /* Pretend the internal PHY is only at address 0 */ if (phy) { - splx(s); return -1; } switch (reg) { @@ -397,16 +394,13 @@ re_miibus_writereg(device_t dev, int phy break; case MII_PHYIDR1: case MII_PHYIDR2: - splx(s); return 0; break; default: printf("%s: bad phy register\n", device_xname(sc->sc_dev)); - splx(s); return -1; } CSR_WRITE_2(sc, re8139_reg, val); - splx(s); return 0; } @@ -473,15 +467,19 @@ re_diag(struct rtk_softc *sc) bus_dmamap_t dmamap; uint16_t status; uint32_t rxstat; - int total_len, i, s, error = 0; + int total_len, i, error = 0; static const uint8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; static const uint8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; + RE_LOCK(sc); + /* Allocate a single mbuf */ MGETHDR(m0, M_DONTWAIT, MT_DATA); - if (m0 == NULL) + if (m0 == NULL) { + RE_UNLOCK(sc); return ENOBUFS; + } /* * Initialize the NIC in test mode. This sets the chip up @@ -494,10 +492,10 @@ re_diag(struct rtk_softc *sc) ifp->if_flags |= IFF_PROMISC; sc->re_testmode = 1; - re_init(ifp); - re_stop(ifp, 0); + re_init_locked(sc); + re_stop_locked(sc, 0); DELAY(100000); - re_init(ifp); + re_init_locked(sc); /* Put some data in the mbuf */ @@ -512,10 +510,8 @@ re_diag(struct rtk_softc *sc) */ CSR_WRITE_2(sc, RTK_ISR, 0xFFFF); - s = splnet(); IF_ENQUEUE(&ifp->if_snd, m0); - re_start(ifp); - splx(s); + re_start_locked(sc); m0 = NULL; /* Wait for it to propagate through the chip */ @@ -587,9 +583,11 @@ re_diag(struct rtk_softc *sc) sc->re_testmode = 0; ifp->if_flags &= ~IFF_PROMISC; - re_stop(ifp, 0); + re_stop_locked(sc, 0); m_freem(m0); + RE_UNLOCK(sc); + return error; } @@ -608,6 +606,8 @@ re_attach(struct rtk_softc *sc) const struct re_revision *rr; const char *re_name = NULL; + mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET); + if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { /* Revision of 8169/8169S/8110s in bits 30..26, 23 */ sc->sc_hwrev = CSR_READ_4(sc, RTK_TXCFG) & RTK_TXCFG_HWREV; @@ -682,8 +682,8 @@ re_attach(struct rtk_softc *sc) case RTK_HWREV_8168H: case RTK_HWREV_8411B: sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | - RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO | - RTKQ_RXDV_GATED | RTKQ_TXRXEN_LATER; + RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_PHYWAKE_PM | + RTKQ_NOJUMBO | RTKQ_RXDV_GATED | RTKQ_TXRXEN_LATER; break; case RTK_HWREV_8100E: case RTK_HWREV_8100E_SPIN2: @@ -722,12 +722,14 @@ re_attach(struct rtk_softc *sc) /* Set RX length mask */ sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN; sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8169; + sc->re_ldata.re_rx_desc_cnt = RE_RX_DESC_CNT_8169; } else { sc->sc_quirk |= RTKQ_NOJUMBO; /* Set RX length mask */ sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN; sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8139; + sc->re_ldata.re_rx_desc_cnt = RE_RX_DESC_CNT_8139; } /* Reset the adapter. */ @@ -781,11 +783,13 @@ re_attach(struct rtk_softc *sc) aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(eaddr)); +#if 0 if (sc->re_ldata.re_tx_desc_cnt > PAGE_SIZE / sizeof(struct re_desc)) { sc->re_ldata.re_tx_desc_cnt = PAGE_SIZE / sizeof(struct re_desc); } +#endif aprint_verbose_dev(sc->sc_dev, "using %d tx descriptors\n", sc->re_ldata.re_tx_desc_cnt); @@ -829,7 +833,7 @@ re_attach(struct rtk_softc *sc) } /* Create DMA maps for TX buffers */ - for (i = 0; i < RE_TX_QLEN; i++) { + for (i = 0; i < RE_TX_QLEN(sc); i++) { error = bus_dmamap_create(sc->sc_dmat, round_page(IP_MAXPACKET), RE_TX_DESC_CNT(sc), RE_TDESC_CMD_FRAGLEN, @@ -879,7 +883,7 @@ re_attach(struct rtk_softc *sc) } /* Create DMA maps for RX buffers */ - for (i = 0; i < RE_RX_DESC_CNT; i++) { + for (i = 0; i < RE_RX_DESC_CNT(sc); i++) { error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, &sc->re_ldata.re_rxsoft[i].rxs_dmamap); if (error) { @@ -899,12 +903,15 @@ re_attach(struct rtk_softc *sc) strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_extflags = IFEF_MPSAFE; ifp->if_ioctl = re_ioctl; sc->ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; ifp->if_start = re_start; ifp->if_stop = re_stop; + sc->sc_if_flags = ifp->if_flags; + /* * IFCAP_CSUM_IPv4_Tx on re(4) is broken for small packets, * so we have a workaround to handle the bug by padding @@ -918,11 +925,11 @@ re_attach(struct rtk_softc *sc) ifp->if_watchdog = re_watchdog; ifp->if_init = re_init; - ifp->if_snd.ifq_maxlen = RE_IFQ_MAXLEN; ifp->if_capenable = ifp->if_capabilities; + IFQ_SET_MAXLEN(&ifp->if_snd, RE_IFQ_MAXLEN); IFQ_SET_READY(&ifp->if_snd); - callout_init(&sc->rtk_tick_ch, 0); + callout_init(&sc->rtk_tick_ch, CALLOUT_MPSAFE); callout_setfunc(&sc->rtk_tick_ch, re_tick, sc); /* Do MII setup */ @@ -943,6 +950,7 @@ re_attach(struct rtk_softc *sc) if_attach(ifp); if_deferred_start_init(ifp, NULL); ether_ifattach(ifp, eaddr); + ether_set_ifflags_cb(&sc->ethercom, re_ifflags_cb); rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), RND_TYPE_NET, RND_FLAG_DEFAULT); @@ -957,7 +965,7 @@ re_attach(struct rtk_softc *sc) fail_8: /* Destroy DMA maps for RX buffers. */ - for (i = 0; i < RE_RX_DESC_CNT; i++) + for (i = 0; i < RE_RX_DESC_CNT(sc); i++) if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rxsoft[i].rxs_dmamap); @@ -975,7 +983,7 @@ re_attach(struct rtk_softc *sc) fail_4: /* Destroy DMA maps for TX buffers. */ - for (i = 0; i < RE_TX_QLEN; i++) + for (i = 0; i < RE_TX_QLEN(sc); i++) if (sc->re_ldata.re_txq[i].txq_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_txq[i].txq_dmamap); @@ -991,6 +999,8 @@ re_attach(struct rtk_softc *sc) bus_dmamem_free(sc->sc_dmat, &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg); fail_0: + mutex_destroy(&sc->sc_lock); + return; } @@ -1029,8 +1039,7 @@ re_detach(struct rtk_softc *sc) if ((sc->sc_flags & RTK_ATTACHED) == 0) return 0; - /* Unhook our tick handler. */ - callout_stop(&sc->rtk_tick_ch); + re_stop(ifp, 1); /* Detach all PHYs. */ mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY); @@ -1043,7 +1052,7 @@ re_detach(struct rtk_softc *sc) ifmedia_fini(&sc->mii.mii_media); /* Destroy DMA maps for RX buffers. */ - for (i = 0; i < RE_RX_DESC_CNT; i++) + for (i = 0; i < RE_RX_DESC_CNT(sc); i++) if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rxsoft[i].rxs_dmamap); @@ -1057,7 +1066,7 @@ re_detach(struct rtk_softc *sc) &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg); /* Destroy DMA maps for TX buffers. */ - for (i = 0; i < RE_TX_QLEN; i++) + for (i = 0; i < RE_TX_QLEN(sc); i++) if (sc->re_ldata.re_txq[i].txq_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_txq[i].txq_dmamap); @@ -1072,6 +1081,8 @@ re_detach(struct rtk_softc *sc) pmf_device_deregister(sc->sc_dev); + mutex_destroy(&sc->sc_lock); + /* we don't want to run again */ sc->sc_flags &= ~RTK_ATTACHED; @@ -1170,7 +1181,7 @@ re_newbuf(struct rtk_softc *sc, int idx, d->re_vlanctl = 0; cmdstat = map->dm_segs[0].ds_len; - if (idx == (RE_RX_DESC_CNT - 1)) + if (idx == (RE_RX_DESC_CNT(sc) - 1)) cmdstat |= RE_RDESC_CMD_EOR; re_set_bufaddr(d, map->dm_segs[0].ds_addr); d->re_cmdstat = htole32(cmdstat); @@ -1191,7 +1202,7 @@ re_tx_list_init(struct rtk_softc *sc) int i; memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc)); - for (i = 0; i < RE_TX_QLEN; i++) { + for (i = 0; i < RE_TX_QLEN(sc); i++) { sc->re_ldata.re_txq[i].txq_mbuf = NULL; } @@ -1201,7 +1212,7 @@ re_tx_list_init(struct rtk_softc *sc) BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); sc->re_ldata.re_txq_prodidx = 0; sc->re_ldata.re_txq_considx = 0; - sc->re_ldata.re_txq_free = RE_TX_QLEN; + sc->re_ldata.re_txq_free = RE_TX_QLEN(sc);; sc->re_ldata.re_tx_free = RE_TX_DESC_CNT(sc); sc->re_ldata.re_tx_nextfree = 0; @@ -1215,7 +1226,7 @@ re_rx_list_init(struct rtk_softc *sc) memset(sc->re_ldata.re_rx_list, 0, RE_RX_LIST_SZ); - for (i = 0; i < RE_RX_DESC_CNT; i++) { + for (i = 0; i < RE_RX_DESC_CNT(sc); i++) { if (re_newbuf(sc, i, NULL) == ENOBUFS) return ENOBUFS; } @@ -1241,6 +1252,8 @@ re_rxeof(struct rtk_softc *sc) struct re_rxsoft *rxs; uint32_t rxstat, rxvlan; + RE_ASSERT_LOCKED(sc); + ifp = &sc->ethercom.ec_if; for (i = sc->re_ldata.re_rx_prodidx;; i = RE_NEXT_RX_DESC(sc, i)) { @@ -1265,6 +1278,21 @@ re_rxeof(struct rtk_softc *sc) bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); if ((rxstat & RE_RDESC_STAT_EOF) == 0) { + if (re_newbuf(sc, i, NULL) != 0) { + /* + * If this is part of a multi-fragment packet, + * discard all the pieces + */ + if_statinc(ifp, if_ierrors); + if (sc->re_head != NULL) { + m_freem(sc->re_head); + sc->re_head = sc->re_tail = NULL; + } + re_newbuf(sc, i, m); + + /* XXX needs to drop remaining frags */ + continue; + } m->m_len = MCLBYTES - RE_ETHER_ALIGN; if (sc->re_head == NULL) sc->re_head = sc->re_tail = m; @@ -1273,7 +1301,6 @@ re_rxeof(struct rtk_softc *sc) sc->re_tail->m_next = m; sc->re_tail = m; } - re_newbuf(sc, i, NULL); continue; } @@ -1441,11 +1468,14 @@ re_txeof(struct rtk_softc *sc) struct re_txq *txq; uint32_t txstat; int idx, descidx; +unsigned count=0; + + RE_ASSERT_LOCKED(sc); ifp = &sc->ethercom.ec_if; for (idx = sc->re_ldata.re_txq_considx; - sc->re_ldata.re_txq_free < RE_TX_QLEN; + sc->re_ldata.re_txq_free < RE_TX_QLEN(sc); idx = RE_NEXT_TXQ(sc, idx), sc->re_ldata.re_txq_free++) { txq = &sc->re_ldata.re_txq[idx]; KASSERT(txq->txq_mbuf != NULL); @@ -1460,7 +1490,7 @@ re_txeof(struct rtk_softc *sc) if (txstat & RE_TDESC_CMD_OWN) { break; } - + sc->re_ldata.re_tx_free += txq->txq_nsegs; KASSERT(sc->re_ldata.re_tx_free <= RE_TX_DESC_CNT(sc)); bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, @@ -1468,6 +1498,7 @@ re_txeof(struct rtk_softc *sc) bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); m_freem(txq->txq_mbuf); txq->txq_mbuf = NULL; + txq->txq_nsegs = -42; net_stat_ref_t nsr = IF_STAT_GETREF(ifp); if (txstat & (RE_TDESC_STAT_EXCESSCOL | RE_TDESC_STAT_COLCNT)) @@ -1477,12 +1508,19 @@ re_txeof(struct rtk_softc *sc) else if_statinc_ref(ifp, nsr, if_opackets); IF_STAT_PUTREF(ifp); + + ++count; } sc->re_ldata.re_txq_considx = idx; - if (sc->re_ldata.re_txq_free > RE_NTXDESC_RSVD) + if ((sc->sc_if_flags & IFF_OACTIVE) != 0 && + sc->re_ldata.re_txq_free > 0 && + sc->re_ldata.re_tx_free >= RE_NTXDESC_RSVD) { +//printf("%s: txeof unbusy q:%d/%d d:%d/%d\n", device_xname(sc->sc_dev), sc->re_ldata.re_txq_free, RE_TX_QLEN(sc), sc->re_ldata.re_tx_free, RE_TX_DESC_CNT(sc)); ifp->if_flags &= ~IFF_OACTIVE; + sc->sc_if_flags = ifp->if_flags; + } /* * If not all descriptors have been released reaped yet, @@ -1490,21 +1528,9 @@ re_txeof(struct rtk_softc *sc) * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ - if (sc->re_ldata.re_txq_free < RE_TX_QLEN) { + if (sc->re_ldata.re_txq_free < RE_TX_QLEN(sc)) { if ((sc->sc_quirk & RTKQ_IM_HW) == 0) CSR_WRITE_4(sc, RTK_TIMERCNT, 1); - if ((sc->sc_quirk & RTKQ_PCIE) != 0) { - /* - * Some chips will ignore a second TX request - * issued while an existing transmission is in - * progress. If the transmitter goes idle but - * there are still packets waiting to be sent, - * we need to restart the channel here to flush - * them out. This only seems to be required with - * the PCIe devices. - */ - CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START); - } } else ifp->if_timer = 0; } @@ -1513,17 +1539,22 @@ static void re_tick(void *arg) { struct rtk_softc *sc = arg; - int s; - /* XXX: just return for 8169S/8110S with rev 2 or newer phy */ - s = splnet(); + RE_LOCK(sc); + re_tick_locked(sc); + RE_UNLOCK(sc); +} +static void +re_tick_locked(struct rtk_softc *sc) +{ + /* XXX: just return for 8169S/8110S with rev 2 or newer phy */ mii_tick(&sc->mii); - splx(s); callout_schedule(&sc->rtk_tick_ch, hz); } + int re_intr(void *arg) { @@ -1543,6 +1574,8 @@ re_intr(void *arg) const uint16_t status_mask = (sc->sc_quirk & RTKQ_IM_HW) ? RTK_INTRS_IM_HW : RTK_INTRS_CPLUS; + RE_LOCK(sc); + for (;;) { status = CSR_READ_2(sc, RTK_ISR); @@ -1561,17 +1594,32 @@ re_intr(void *arg) if (status & (RTK_ISR_RX_OK | RTK_ISR_RX_ERR)) re_rxeof(sc); + /* + * Some chips will ignore a second TX request issued + * while an existing transmission is in progress. If + * the transmitter goes idle but there are still + * packets waiting to be sent, we need to restart the + * channel here to flush them out. This only seems to + * be required with the PCIe devices. + */ + if ((status & (RTK_ISR_TX_OK | RTK_ISR_TX_DESC_UNAVAIL)) && + (sc->sc_quirk & RTKQ_PCIE) != 0) { + if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) + CSR_WRITE_1(sc, RTK_TXSTART, RTK_TXSTART_START); + else + CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START); + } + if (status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_TX_ERR | RTK_ISR_TX_DESC_UNAVAIL | RTK_ISR_TX_OK)) re_txeof(sc); - if (status & RTK_ISR_SYSTEM_ERR) { - re_init(ifp); - } + if (status & RTK_ISR_SYSTEM_ERR) + re_init_locked(sc); if (status & RTK_ISR_LINKCHG) { callout_stop(&sc->rtk_tick_ch); - re_tick(sc); + re_tick_locked(sc); } } @@ -1580,6 +1628,8 @@ re_intr(void *arg) rnd_add_uint32(&sc->rnd_source, rndstatus); + RE_UNLOCK(sc); + return handled; } @@ -1588,11 +1638,20 @@ re_intr(void *arg) /* * Main transmit routine for C+ and gigE NICs. */ - static void re_start(struct ifnet *ifp) { - struct rtk_softc *sc; + struct rtk_softc *sc = ifp->if_softc; + + RE_LOCK(sc); + re_start_locked(sc); + RE_UNLOCK(sc); +} + +static void +re_start_locked(struct rtk_softc *sc) +{ + struct ifnet *ifp = &sc->ethercom.ec_if; struct mbuf *m; bus_dmamap_t map; struct re_txq *txq; @@ -1602,22 +1661,24 @@ re_start(struct ifnet *ifp) int startdesc, curdesc, lastdesc; bool pad; - sc = ifp->if_softc; + RE_ASSERT_LOCKED(sc); + ofree = sc->re_ldata.re_txq_free; for (idx = sc->re_ldata.re_txq_prodidx;; idx = RE_NEXT_TXQ(sc, idx)) { - IFQ_POLL(&ifp->if_snd, m); - if (m == NULL) - break; - if (sc->re_ldata.re_txq_free == 0 || sc->re_ldata.re_tx_free == 0) { /* no more free slots left */ ifp->if_flags |= IFF_OACTIVE; + sc->sc_if_flags = ifp->if_flags; break; } + IFQ_POLL(&ifp->if_snd, m); + if (m == NULL) + break; + /* * Set up checksum offload. Note: checksum offload bits must * appear in all descriptors of a multi-descriptor transmit @@ -1703,6 +1764,7 @@ re_start(struct ifnet *ifp) * Not enough free descriptors to transmit this packet. */ ifp->if_flags |= IFF_OACTIVE; + sc->sc_if_flags = ifp->if_flags; bus_dmamap_unload(sc->sc_dmat, map); break; } @@ -1715,7 +1777,6 @@ re_start(struct ifnet *ifp) */ bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, BUS_DMASYNC_PREWRITE); - /* * Set up hardware VLAN tagging. Note: vlan tag info must * appear in all descriptors of a multi-descriptor @@ -1852,6 +1913,19 @@ static int re_init(struct ifnet *ifp) { struct rtk_softc *sc = ifp->if_softc; + int error; + + RE_LOCK(sc); + error = re_init_locked(sc); + RE_UNLOCK(sc); + + return error; +} + +static int +re_init_locked(struct rtk_softc *sc) +{ + struct ifnet *ifp = &sc->ethercom.ec_if; uint32_t rxcfg = 0; uint16_t cfg; int error; @@ -1860,13 +1934,15 @@ re_init(struct ifnet *ifp) uint32_t reg; #endif + sc->sc_if_flags = ifp->if_flags; + if ((error = re_enable(sc)) != 0) goto out; /* * Cancel pending I/O and free all RX/TX buffers. */ - re_stop(ifp, 0); + re_stop_locked(sc, 0); re_reset(sc); @@ -2086,12 +2162,14 @@ re_init(struct ifnet *ifp) ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; + sc->sc_if_flags = ifp->if_flags; callout_schedule(&sc->rtk_tick_ch, hz); out: if (error) { ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); + sc->sc_if_flags = ifp->if_flags; ifp->if_timer = 0; printf("%s: interface not running\n", device_xname(sc->sc_dev)); @@ -2105,9 +2183,7 @@ re_ioctl(struct ifnet *ifp, u_long comma { struct rtk_softc *sc = ifp->if_softc; struct ifreq *ifr = data; - int s, error = 0; - - s = splnet(); + int error = 0; switch (command) { case SIOCSIFMTU: @@ -2127,42 +2203,70 @@ re_ioctl(struct ifnet *ifp, u_long comma error = 0; break; default: - if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) + const int s = splnet(); + error = ether_ioctl(ifp, command, data); + splx(s); + + if (error != ENETRESET) break; error = 0; if (command == SIOCSIFCAP) - error = if_init(ifp); - else if (command != SIOCADDMULTI && command != SIOCDELMULTI) - ; - else if (ifp->if_flags & IFF_RUNNING) - rtk_setmulti(sc); + error = (*ifp->if_init)(ifp); + else if (command == SIOCADDMULTI || command == SIOCADDMULTI) { + RE_LOCK(sc); + if (sc->sc_if_flags & IFF_RUNNING) + rtk_setmulti(sc); + RE_UNLOCK(sc); + } break; } - splx(s); - return error; } +static int +re_ifflags_cb(struct ethercom *ec) +{ + struct ifnet * const ifp = &ec->ec_if; + struct rtk_softc * const sc = ifp->if_softc; + int ret = 0; + u_short change; + + KASSERT(IFNET_LOCKED(ifp)); + RE_LOCK(sc); + + change = ifp->if_flags ^ sc->sc_if_flags; + sc->sc_if_flags = ifp->if_flags; + + if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { + ret = ENETRESET; + } else if ((change & IFF_PROMISC) != 0) { + if ((sc->sc_if_flags & IFF_RUNNING) != 0) + rtk_setmulti(sc); + } + + RE_UNLOCK(sc); + return ret; +} + static void re_watchdog(struct ifnet *ifp) { - struct rtk_softc *sc; - int s; + struct rtk_softc *sc = ifp->if_softc; + + RE_LOCK(sc); - sc = ifp->if_softc; - s = splnet(); printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); if_statinc(ifp, if_oerrors); re_txeof(sc); re_rxeof(sc); - re_init(ifp); + re_init_locked(sc); - splx(s); + RE_UNLOCK(sc); } /* @@ -2172,9 +2276,21 @@ re_watchdog(struct ifnet *ifp) static void re_stop(struct ifnet *ifp, int disable) { - int i; struct rtk_softc *sc = ifp->if_softc; + RE_LOCK(sc); + + re_stop_locked(sc, disable); + + RE_UNLOCK(sc); +} + +static void +re_stop_locked(struct rtk_softc *sc, int disable) +{ + struct ifnet *ifp = &sc->ethercom.ec_if; + int i; + callout_stop(&sc->rtk_tick_ch); mii_down(&sc->mii); @@ -2209,17 +2325,18 @@ re_stop(struct ifnet *ifp, int disable) } /* Free the TX list buffers. */ - for (i = 0; i < RE_TX_QLEN; i++) { + for (i = 0; i < RE_TX_QLEN(sc); i++) { if (sc->re_ldata.re_txq[i].txq_mbuf != NULL) { bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_txq[i].txq_dmamap); m_freem(sc->re_ldata.re_txq[i].txq_mbuf); sc->re_ldata.re_txq[i].txq_mbuf = NULL; + sc->re_ldata.re_txq[i].txq_nsegs = 0; } } /* Free the RX list buffers. */ - for (i = 0; i < RE_RX_DESC_CNT; i++) { + for (i = 0; i < RE_RX_DESC_CNT(sc); i++) { if (sc->re_ldata.re_rxsoft[i].rxs_mbuf != NULL) { bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rxsoft[i].rxs_dmamap); @@ -2233,4 +2350,5 @@ re_stop(struct ifnet *ifp, int disable) ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; + sc->sc_if_flags = ifp->if_flags; } Index: sys/dev/ic/rtl81x9reg.h =================================================================== RCS file: /cvsroot/src/sys/dev/ic/rtl81x9reg.h,v retrieving revision 1.55 diff -p -u -r1.55 rtl81x9reg.h --- sys/dev/ic/rtl81x9reg.h 12 Aug 2024 18:55:01 -0000 1.55 +++ sys/dev/ic/rtl81x9reg.h 27 Nov 2025 11:29:37 -0000 @@ -616,7 +616,7 @@ struct re_stats { uint16_t re_rx_underruns; }; -#define RE_IFQ_MAXLEN 512 +#define RE_IFQ_MAXLEN 1024 #define RE_JUMBO_FRAMELEN ETHER_MAX_LEN_JUMBO #define RE_JUMBO_MTU ETHERMTU_JUMBO Index: sys/dev/ic/rtl81x9var.h =================================================================== RCS file: /cvsroot/src/sys/dev/ic/rtl81x9var.h,v retrieving revision 1.59 diff -p -u -r1.59 rtl81x9var.h --- sys/dev/ic/rtl81x9var.h 12 Aug 2024 18:55:01 -0000 1.59 +++ sys/dev/ic/rtl81x9var.h 27 Nov 2025 11:29:37 -0000 @@ -113,9 +113,8 @@ struct rtk_mii_frame { * The 8139C+ and 8169 gigE chips support descriptor-based TX * and RX. In fact, they even support TCP large send. Descriptors * must be allocated in contiguous blocks that are aligned on a - * 256-byte boundary. The RX rings can hold a maximum of 64 descriptors. - * The TX rings can hold upto 64 descriptors on 8139C+, and - * 1024 descriptors on 8169 gigE chips. + * 256-byte boundary. The rings can hold a maximum of 64 descriptors + * on 8139C+, and 1024 descriptors on 8169 gigE chips. */ #define RE_RING_ALIGN 256 @@ -123,10 +122,10 @@ struct rtk_mii_frame { * Size of descriptors and TX queue. * These numbers must be power of two to simplify RE_NEXT_*() macro. */ -#define RE_RX_DESC_CNT 64 +#define RE_RX_DESC_CNT_8139 64 #define RE_TX_DESC_CNT_8139 64 +#define RE_RX_DESC_CNT_8169 256 #define RE_TX_DESC_CNT_8169 1024 -#define RE_TX_QLEN 64 #define RE_NTXDESC_RSVD 4 @@ -143,7 +142,7 @@ struct re_txq { }; struct re_list_data { - struct re_txq re_txq[RE_TX_QLEN]; + struct re_txq re_txq[RE_TX_DESC_CNT_8169]; /* RE_TX_QLEN */ int re_txq_considx; int re_txq_prodidx; int re_txq_free; @@ -156,7 +155,8 @@ struct re_list_data { bus_dma_segment_t re_tx_listseg; int re_tx_listnseg; - struct re_rxsoft re_rxsoft[RE_RX_DESC_CNT]; + struct re_rxsoft re_rxsoft[RE_RX_DESC_CNT_8169]; + int re_rx_desc_cnt; /* # of descriptors */ bus_dmamap_t re_rx_list_map; struct re_desc *re_rx_list; int re_rx_prodidx; @@ -229,16 +229,21 @@ struct rtk_softc { void (*sc_disable) (struct rtk_softc *); krndsource_t rnd_source; + + kmutex_t sc_lock; + u_short sc_if_flags; }; #define RE_TX_DESC_CNT(sc) ((sc)->re_ldata.re_tx_desc_cnt) #define RE_TX_LIST_SZ(sc) (RE_TX_DESC_CNT(sc) * sizeof(struct re_desc)) #define RE_NEXT_TX_DESC(sc, x) (((x) + 1) & (RE_TX_DESC_CNT(sc) - 1)) -#define RE_RX_LIST_SZ (RE_RX_DESC_CNT * sizeof(struct re_desc)) -#define RE_NEXT_RX_DESC(sc, x) (((x) + 1) & (RE_RX_DESC_CNT - 1)) +#define RE_RX_DESC_CNT(sc) ((sc)->re_ldata.re_rx_desc_cnt) +#define RE_RX_LIST_SZ (RE_RX_DESC_CNT(sc) * sizeof(struct re_desc)) +#define RE_NEXT_RX_DESC(sc, x) (((x) + 1) & (RE_RX_DESC_CNT(sc) - 1)) -#define RE_NEXT_TXQ(sc, x) (((x) + 1) & (RE_TX_QLEN - 1)) +#define RE_TX_QLEN(sc) 256 +#define RE_NEXT_TXQ(sc, x) (((x) + 1) & (RE_TX_QLEN(sc) - 1)) #define RE_TXDESCSYNC(sc, idx, ops) \ bus_dmamap_sync((sc)->sc_dmat, \ Index: sys/dev/pci/if_re_pci.c =================================================================== RCS file: /cvsroot/src/sys/dev/pci/if_re_pci.c,v retrieving revision 1.53 diff -p -u -r1.53 if_re_pci.c --- sys/dev/pci/if_re_pci.c 28 Feb 2024 18:55:11 -0000 1.53 +++ sys/dev/pci/if_re_pci.c 27 Nov 2025 11:29:37 -0000 @@ -99,6 +99,9 @@ static const struct rtk_type re_devs[] = { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168, RTK_8168, "RealTek 8168/8111 PCIe Gigabit Ethernet" }, + { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168_2, + RTK_8168, + "RealTek 8161 PCIe Gigabit Ethernet" }, { PCI_VENDOR_NCUBE, PCI_PRODUCT_NCUBE_TG3648, RTK_8168, "TP-Link TG-3468 v2 (RTL8168) Gigabit Ethernet" }, @@ -303,6 +306,7 @@ re_pci_attach(device_t parent, device_t aprint_error("\n"); return; } + pci_intr_setattr(pc, psc->sc_ih, PCI_INTR_MPSAFE, true); aprint_normal_dev(self, "interrupting at %s\n", intrstr); re_attach(sc);