aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 1 | /* |
| 2 | * xen paravirt network card backend |
| 3 | * |
| 4 | * (c) Gerd Hoffmann <kraxel@redhat.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; under version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License along |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
Paolo Bonzini | 6b620ca | 2012-01-13 17:44:23 +0100 | [diff] [blame] | 17 | * |
| 18 | * Contributions after 2012-01-13 are licensed under the terms of the |
| 19 | * GNU GPL, version 2 or (at your option) any later version. |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 20 | */ |
| 21 | |
Peter Maydell | 21cbfe5 | 2016-01-26 18:17:06 +0000 | [diff] [blame] | 22 | #include "qemu/osdep.h" |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 23 | #include <sys/socket.h> |
| 24 | #include <sys/ioctl.h> |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 25 | #include <sys/wait.h> |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 26 | |
Paolo Bonzini | 1422e32 | 2012-10-24 08:43:34 +0200 | [diff] [blame] | 27 | #include "net/net.h" |
Mark McLoughlin | 7200ac3 | 2009-10-22 17:49:03 +0100 | [diff] [blame] | 28 | #include "net/checksum.h" |
Mark McLoughlin | 658788c | 2009-11-25 18:49:28 +0000 | [diff] [blame] | 29 | #include "net/util.h" |
Paul Durrant | 2d0ed5e | 2019-01-08 14:48:46 +0000 | [diff] [blame] | 30 | #include "hw/xen/xen-legacy-backend.h" |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 31 | |
Anthony PERARD | a3434a2 | 2019-06-21 11:54:41 +0100 | [diff] [blame] | 32 | #include "hw/xen/interface/io/netif.h" |
Anthony PERARD | b41f671 | 2012-06-21 11:43:59 +0000 | [diff] [blame] | 33 | |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 34 | /* ------------------------------------------------------------- */ |
| 35 | |
| 36 | struct XenNetDev { |
Paul Durrant | 2d0ed5e | 2019-01-08 14:48:46 +0000 | [diff] [blame] | 37 | struct XenLegacyDevice xendev; /* must be first */ |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 38 | char *mac; |
| 39 | int tx_work; |
| 40 | int tx_ring_ref; |
| 41 | int rx_ring_ref; |
| 42 | struct netif_tx_sring *txs; |
| 43 | struct netif_rx_sring *rxs; |
| 44 | netif_tx_back_ring_t tx_ring; |
| 45 | netif_rx_back_ring_t rx_ring; |
Mark McLoughlin | 658788c | 2009-11-25 18:49:28 +0000 | [diff] [blame] | 46 | NICConf conf; |
| 47 | NICState *nic; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 48 | }; |
| 49 | |
| 50 | /* ------------------------------------------------------------- */ |
| 51 | |
| 52 | static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st) |
| 53 | { |
| 54 | RING_IDX i = netdev->tx_ring.rsp_prod_pvt; |
| 55 | netif_tx_response_t *resp; |
| 56 | int notify; |
| 57 | |
| 58 | resp = RING_GET_RESPONSE(&netdev->tx_ring, i); |
| 59 | resp->id = txp->id; |
| 60 | resp->status = st; |
| 61 | |
| 62 | #if 0 |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 63 | if (txp->flags & NETTXF_extra_info) { |
| 64 | RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; |
| 65 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 66 | #endif |
| 67 | |
| 68 | netdev->tx_ring.rsp_prod_pvt = ++i; |
| 69 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 70 | if (notify) { |
Emil Condrea | ba18fa2 | 2016-10-25 08:50:16 +0300 | [diff] [blame] | 71 | xen_pv_send_notify(&netdev->xendev); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 72 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 73 | |
| 74 | if (i == netdev->tx_ring.req_cons) { |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 75 | int more_to_do; |
| 76 | RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); |
| 77 | if (more_to_do) { |
| 78 | netdev->tx_work++; |
| 79 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 80 | } |
| 81 | } |
| 82 | |
| 83 | static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end) |
| 84 | { |
| 85 | #if 0 |
| 86 | /* |
| 87 | * Hmm, why netback fails everything in the ring? |
| 88 | * Should we do that even when not supporting SG and TSO? |
| 89 | */ |
| 90 | RING_IDX cons = netdev->tx_ring.req_cons; |
| 91 | |
| 92 | do { |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 93 | make_tx_response(netif, txp, NETIF_RSP_ERROR); |
| 94 | if (cons >= end) { |
| 95 | break; |
| 96 | } |
| 97 | txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 98 | } while (1); |
| 99 | netdev->tx_ring.req_cons = cons; |
| 100 | netif_schedule_work(netif); |
| 101 | netif_put(netif); |
| 102 | #else |
| 103 | net_tx_response(netdev, txp, NETIF_RSP_ERROR); |
| 104 | #endif |
| 105 | } |
| 106 | |
| 107 | static void net_tx_packets(struct XenNetDev *netdev) |
| 108 | { |
| 109 | netif_tx_request_t txreq; |
| 110 | RING_IDX rc, rp; |
| 111 | void *page; |
| 112 | void *tmpbuf = NULL; |
| 113 | |
| 114 | for (;;) { |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 115 | rc = netdev->tx_ring.req_cons; |
| 116 | rp = netdev->tx_ring.sring->req_prod; |
| 117 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 118 | |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 119 | while ((rc != rp)) { |
| 120 | if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) { |
| 121 | break; |
| 122 | } |
| 123 | memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); |
| 124 | netdev->tx_ring.req_cons = ++rc; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 125 | |
| 126 | #if 1 |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 127 | /* should not happen in theory, we don't announce the * |
| 128 | * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ |
| 129 | if (txreq.flags & NETTXF_extra_info) { |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 130 | xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 131 | net_tx_error(netdev, &txreq, rc); |
| 132 | continue; |
| 133 | } |
| 134 | if (txreq.flags & NETTXF_more_data) { |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 135 | xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 136 | net_tx_error(netdev, &txreq, rc); |
| 137 | continue; |
| 138 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 139 | #endif |
| 140 | |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 141 | if (txreq.size < 14) { |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 142 | xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n", |
Emil Condrea | b9730c5 | 2016-10-25 08:50:08 +0300 | [diff] [blame] | 143 | txreq.size); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 144 | net_tx_error(netdev, &txreq, rc); |
| 145 | continue; |
| 146 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 147 | |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 148 | if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 149 | xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n"); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 150 | net_tx_error(netdev, &txreq, rc); |
| 151 | continue; |
| 152 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 153 | |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 154 | xen_pv_printf(&netdev->xendev, 3, |
Emil Condrea | c22e91b | 2016-10-25 08:50:07 +0300 | [diff] [blame] | 155 | "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 156 | txreq.gref, txreq.offset, txreq.size, txreq.flags, |
| 157 | (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", |
| 158 | (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", |
| 159 | (txreq.flags & NETTXF_more_data) ? " more_data" : "", |
| 160 | (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 161 | |
Paul Durrant | 58560f2 | 2018-05-17 16:35:53 +0100 | [diff] [blame] | 162 | page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref, |
| 163 | PROT_READ); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 164 | if (page == NULL) { |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 165 | xen_pv_printf(&netdev->xendev, 0, |
Emil Condrea | c22e91b | 2016-10-25 08:50:07 +0300 | [diff] [blame] | 166 | "error: tx gref dereference failed (%d)\n", |
| 167 | txreq.gref); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 168 | net_tx_error(netdev, &txreq, rc); |
| 169 | continue; |
| 170 | } |
| 171 | if (txreq.flags & NETTXF_csum_blank) { |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 172 | /* have read-only mapping -> can't fill checksum in-place */ |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 173 | if (!tmpbuf) { |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 174 | tmpbuf = g_malloc(XC_PAGE_SIZE); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 175 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 176 | memcpy(tmpbuf, page + txreq.offset, txreq.size); |
Bin Meng | f574633 | 2020-12-11 17:35:12 +0800 | [diff] [blame] | 177 | net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL); |
Jason Wang | b356f76 | 2013-01-30 19:12:22 +0800 | [diff] [blame] | 178 | qemu_send_packet(qemu_get_queue(netdev->nic), tmpbuf, |
| 179 | txreq.size); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 180 | } else { |
Jason Wang | b356f76 | 2013-01-30 19:12:22 +0800 | [diff] [blame] | 181 | qemu_send_packet(qemu_get_queue(netdev->nic), |
| 182 | page + txreq.offset, txreq.size); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 183 | } |
Paul Durrant | 58560f2 | 2018-05-17 16:35:53 +0100 | [diff] [blame] | 184 | xen_be_unmap_grant_ref(&netdev->xendev, page); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 185 | net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); |
| 186 | } |
| 187 | if (!netdev->tx_work) { |
| 188 | break; |
| 189 | } |
| 190 | netdev->tx_work = 0; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 191 | } |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 192 | g_free(tmpbuf); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | /* ------------------------------------------------------------- */ |
| 196 | |
| 197 | static void net_rx_response(struct XenNetDev *netdev, |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 198 | netif_rx_request_t *req, int8_t st, |
| 199 | uint16_t offset, uint16_t size, |
| 200 | uint16_t flags) |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 201 | { |
| 202 | RING_IDX i = netdev->rx_ring.rsp_prod_pvt; |
| 203 | netif_rx_response_t *resp; |
| 204 | int notify; |
| 205 | |
| 206 | resp = RING_GET_RESPONSE(&netdev->rx_ring, i); |
| 207 | resp->offset = offset; |
| 208 | resp->flags = flags; |
| 209 | resp->id = req->id; |
| 210 | resp->status = (int16_t)size; |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 211 | if (st < 0) { |
| 212 | resp->status = (int16_t)st; |
| 213 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 214 | |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 215 | xen_pv_printf(&netdev->xendev, 3, |
Emil Condrea | b9730c5 | 2016-10-25 08:50:08 +0300 | [diff] [blame] | 216 | "rx response: idx %d, status %d, flags 0x%x\n", |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 217 | i, resp->status, resp->flags); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 218 | |
| 219 | netdev->rx_ring.rsp_prod_pvt = ++i; |
| 220 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 221 | if (notify) { |
Emil Condrea | ba18fa2 | 2016-10-25 08:50:16 +0300 | [diff] [blame] | 222 | xen_pv_send_notify(&netdev->xendev); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 223 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | #define NET_IP_ALIGN 2 |
| 227 | |
Stefan Hajnoczi | 4e68f7a | 2012-07-24 16:35:13 +0100 | [diff] [blame] | 228 | static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size) |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 229 | { |
Jason Wang | cc1f0f4 | 2013-01-30 19:12:23 +0800 | [diff] [blame] | 230 | struct XenNetDev *netdev = qemu_get_nic_opaque(nc); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 231 | netif_rx_request_t rxreq; |
| 232 | RING_IDX rc, rp; |
| 233 | void *page; |
| 234 | |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 235 | if (netdev->xendev.be_state != XenbusStateConnected) { |
| 236 | return -1; |
| 237 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 238 | |
| 239 | rc = netdev->rx_ring.req_cons; |
| 240 | rp = netdev->rx_ring.sring->req_prod; |
| 241 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ |
| 242 | |
| 243 | if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { |
Fam Zheng | 7bba83b | 2015-07-28 17:52:56 +0800 | [diff] [blame] | 244 | return 0; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 245 | } |
| 246 | if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 247 | xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 248 | (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); |
| 249 | return -1; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 250 | } |
| 251 | |
| 252 | memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); |
| 253 | netdev->rx_ring.req_cons = ++rc; |
| 254 | |
Paul Durrant | 58560f2 | 2018-05-17 16:35:53 +0100 | [diff] [blame] | 255 | page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 256 | if (page == NULL) { |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 257 | xen_pv_printf(&netdev->xendev, 0, |
Emil Condrea | b9730c5 | 2016-10-25 08:50:08 +0300 | [diff] [blame] | 258 | "error: rx gref dereference failed (%d)\n", |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 259 | rxreq.gref); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 260 | net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); |
| 261 | return -1; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 262 | } |
| 263 | memcpy(page + NET_IP_ALIGN, buf, size); |
Paul Durrant | 58560f2 | 2018-05-17 16:35:53 +0100 | [diff] [blame] | 264 | xen_be_unmap_grant_ref(&netdev->xendev, page); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 265 | net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0); |
Mark McLoughlin | 4f1c942 | 2009-05-18 13:40:55 +0100 | [diff] [blame] | 266 | |
| 267 | return size; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 268 | } |
| 269 | |
| 270 | /* ------------------------------------------------------------- */ |
| 271 | |
Mark McLoughlin | 658788c | 2009-11-25 18:49:28 +0000 | [diff] [blame] | 272 | static NetClientInfo net_xen_info = { |
Eric Blake | f394b2e | 2016-07-13 21:50:23 -0600 | [diff] [blame] | 273 | .type = NET_CLIENT_DRIVER_NIC, |
Mark McLoughlin | 658788c | 2009-11-25 18:49:28 +0000 | [diff] [blame] | 274 | .size = sizeof(NICState), |
Mark McLoughlin | 658788c | 2009-11-25 18:49:28 +0000 | [diff] [blame] | 275 | .receive = net_rx_packet, |
| 276 | }; |
| 277 | |
Paul Durrant | 2d0ed5e | 2019-01-08 14:48:46 +0000 | [diff] [blame] | 278 | static int net_init(struct XenLegacyDevice *xendev) |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 279 | { |
| 280 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 281 | |
| 282 | /* read xenstore entries */ |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 283 | if (netdev->mac == NULL) { |
| 284 | netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); |
| 285 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 286 | |
| 287 | /* do we have all we need? */ |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 288 | if (netdev->mac == NULL) { |
Mark McLoughlin | 658788c | 2009-11-25 18:49:28 +0000 | [diff] [blame] | 289 | return -1; |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) { |
| 293 | return -1; |
| 294 | } |
Mark McLoughlin | 658788c | 2009-11-25 18:49:28 +0000 | [diff] [blame] | 295 | |
Mark McLoughlin | 658788c | 2009-11-25 18:49:28 +0000 | [diff] [blame] | 296 | netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf, |
| 297 | "xen", NULL, netdev); |
| 298 | |
Laurent Vivier | 53b85d9 | 2022-10-21 11:09:10 +0200 | [diff] [blame] | 299 | qemu_set_info_str(qemu_get_queue(netdev->nic), |
| 300 | "nic: xenbus vif macaddr=%s", netdev->mac); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 301 | |
| 302 | /* fill info */ |
| 303 | xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1); |
| 304 | xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0); |
| 305 | |
| 306 | return 0; |
| 307 | } |
| 308 | |
Paul Durrant | 2d0ed5e | 2019-01-08 14:48:46 +0000 | [diff] [blame] | 309 | static int net_connect(struct XenLegacyDevice *xendev) |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 310 | { |
| 311 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); |
| 312 | int rx_copy; |
| 313 | |
| 314 | if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref", |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 315 | &netdev->tx_ring_ref) == -1) { |
| 316 | return -1; |
| 317 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 318 | if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref", |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 319 | &netdev->rx_ring_ref) == -1) { |
| 320 | return 1; |
| 321 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 322 | if (xenstore_read_fe_int(&netdev->xendev, "event-channel", |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 323 | &netdev->xendev.remote_port) == -1) { |
| 324 | return -1; |
| 325 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 326 | |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 327 | if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) { |
| 328 | rx_copy = 0; |
| 329 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 330 | if (rx_copy == 0) { |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 331 | xen_pv_printf(&netdev->xendev, 0, |
Emil Condrea | b9730c5 | 2016-10-25 08:50:08 +0300 | [diff] [blame] | 332 | "frontend doesn't support rx-copy.\n"); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 333 | return -1; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 334 | } |
| 335 | |
Paul Durrant | 58560f2 | 2018-05-17 16:35:53 +0100 | [diff] [blame] | 336 | netdev->txs = xen_be_map_grant_ref(&netdev->xendev, |
| 337 | netdev->tx_ring_ref, |
| 338 | PROT_READ | PROT_WRITE); |
Chen Gang | b4f72e3 | 2014-12-17 04:48:54 +0800 | [diff] [blame] | 339 | if (!netdev->txs) { |
| 340 | return -1; |
| 341 | } |
Paul Durrant | 58560f2 | 2018-05-17 16:35:53 +0100 | [diff] [blame] | 342 | netdev->rxs = xen_be_map_grant_ref(&netdev->xendev, |
| 343 | netdev->rx_ring_ref, |
| 344 | PROT_READ | PROT_WRITE); |
Chen Gang | b4f72e3 | 2014-12-17 04:48:54 +0800 | [diff] [blame] | 345 | if (!netdev->rxs) { |
Paul Durrant | 58560f2 | 2018-05-17 16:35:53 +0100 | [diff] [blame] | 346 | xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); |
Chen Gang | b4f72e3 | 2014-12-17 04:48:54 +0800 | [diff] [blame] | 347 | netdev->txs = NULL; |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 348 | return -1; |
| 349 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 350 | BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); |
| 351 | BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); |
| 352 | |
| 353 | xen_be_bind_evtchn(&netdev->xendev); |
| 354 | |
Emil Condrea | 96c77db | 2016-10-25 08:50:14 +0300 | [diff] [blame] | 355 | xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, " |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 356 | "remote port %d, local port %d\n", |
| 357 | netdev->tx_ring_ref, netdev->rx_ring_ref, |
| 358 | netdev->xendev.remote_port, netdev->xendev.local_port); |
Gerd Hoffmann | 3e3cabc | 2009-06-11 11:32:51 +0200 | [diff] [blame] | 359 | |
| 360 | net_tx_packets(netdev); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 361 | return 0; |
| 362 | } |
| 363 | |
Paul Durrant | 2d0ed5e | 2019-01-08 14:48:46 +0000 | [diff] [blame] | 364 | static void net_disconnect(struct XenLegacyDevice *xendev) |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 365 | { |
| 366 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); |
| 367 | |
Emil Condrea | 65807f4 | 2016-10-25 08:50:15 +0300 | [diff] [blame] | 368 | xen_pv_unbind_evtchn(&netdev->xendev); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 369 | |
| 370 | if (netdev->txs) { |
Paul Durrant | 58560f2 | 2018-05-17 16:35:53 +0100 | [diff] [blame] | 371 | xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 372 | netdev->txs = NULL; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 373 | } |
| 374 | if (netdev->rxs) { |
Paul Durrant | 58560f2 | 2018-05-17 16:35:53 +0100 | [diff] [blame] | 375 | xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs); |
Anthony PERARD | 209cd7a | 2010-09-23 12:28:45 +0100 | [diff] [blame] | 376 | netdev->rxs = NULL; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 377 | } |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 378 | } |
| 379 | |
Paul Durrant | 2d0ed5e | 2019-01-08 14:48:46 +0000 | [diff] [blame] | 380 | static void net_event(struct XenLegacyDevice *xendev) |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 381 | { |
| 382 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); |
| 383 | net_tx_packets(netdev); |
Jason Wang | b356f76 | 2013-01-30 19:12:22 +0800 | [diff] [blame] | 384 | qemu_flush_queued_packets(qemu_get_queue(netdev->nic)); |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 385 | } |
| 386 | |
Paul Durrant | 2d0ed5e | 2019-01-08 14:48:46 +0000 | [diff] [blame] | 387 | static int net_free(struct XenLegacyDevice *xendev) |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 388 | { |
| 389 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); |
| 390 | |
Chen Gang | d468583 | 2014-12-17 04:52:16 +0800 | [diff] [blame] | 391 | if (netdev->nic) { |
| 392 | qemu_del_nic(netdev->nic); |
| 393 | netdev->nic = NULL; |
| 394 | } |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 395 | g_free(netdev->mac); |
Chen Gang | a39d97c | 2014-12-17 04:58:42 +0800 | [diff] [blame] | 396 | netdev->mac = NULL; |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | /* ------------------------------------------------------------- */ |
| 401 | |
| 402 | struct XenDevOps xen_netdev_ops = { |
| 403 | .size = sizeof(struct XenNetDev), |
| 404 | .flags = DEVOPS_FLAG_NEED_GNTDEV, |
| 405 | .init = net_init, |
John Haxby | 384087b | 2011-06-17 12:15:35 +0000 | [diff] [blame] | 406 | .initialise = net_connect, |
aliguori | e613b06 | 2009-04-22 15:19:35 +0000 | [diff] [blame] | 407 | .event = net_event, |
| 408 | .disconnect = net_disconnect, |
| 409 | .free = net_free, |
| 410 | }; |