tree: https://gitee.com/openeuler/kernel.git OLK-5.10 head: 32bb7b0a7c87769858fbb9d60574b830383c28f7 commit: c92533ad657c0a5d1a7a6355ef2c357cbb91d5c0 [29991/30000] drivers: initial support for KPU FLEXFLOW-2100P driver from Yusur Technology config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20240315/202403152311.T6JFVh2t-lkp@i...) compiler: clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240315/202403152311.T6JFVh2t-lkp@i...)
If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot lkp@intel.com | Closes: https://lore.kernel.org/oe-kbuild-all/202403152311.T6JFVh2t-lkp@intel.com/
All warnings (new ones prefixed by >>):
drivers/net/ethernet/yusur/k2/ys_k2_tx.c:137:18: warning: variable 'clean_tail_ptr' set but not used [-Wunused-but-set-variable]
137 | u32 desc_index, clean_tail_ptr; | ^ 1 warning generated. --
drivers/net/ethernet/yusur/k2/../platform/ys_intr.c:113:40: warning: overlapping comparisons always evaluate to false [-Wtautological-overlap-compare]
113 | if (sub->irq_type < YS_IRQ_TYPE_QUEUE && | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~ 114 | sub->irq_type > YS_IRQ_TYPE_HW_PRIVATE) { | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1 warning generated.
vim +/clean_tail_ptr +137 drivers/net/ethernet/yusur/k2/ys_k2_tx.c
130 131 netdev_tx_t ysk2_start_xmit(struct sk_buff *skb, struct net_device *ndev) 132 { 133 struct ys_ndev_priv *ndev_priv = netdev_priv(ndev); 134 struct skb_shared_info *shinfo = skb_shinfo(skb); 135 struct ysk2_port *k2port = ndev_priv->adp_priv; 136 struct ysk2_desc_ring *tx_ring;
137 u32 desc_index, clean_tail_ptr;
138 struct ysk2_tx_info *tx_info; 139 struct ysk2_desc *tx_desc; 140 bool stop_queue; 141 u16 txq_index; 142 143 if (unlikely(!(ndev->flags & IFF_UP))) 144 goto tx_drop; 145 146 txq_index = skb_get_queue_mapping(skb); 147 tx_ring = k2port->qps[txq_index].tx_ring; 148 149 clean_tail_ptr = READ_ONCE(tx_ring->ring.clean_tail_ptr); 150 151 desc_index = tx_ring->ring.head_ptr & tx_ring->ring.size_mask; 152 153 tx_desc = (struct ysk2_desc *)(tx_ring->ring.buf + 154 desc_index * tx_ring->ring.stride); 155 156 tx_info = &tx_ring->tx_info[desc_index]; 157 158 /* too many frags or very short data portion; linearize */ 159 if (shinfo->nr_frags > tx_ring->tx_max_sg_frags || 160 (skb->data_len && skb->data_len < 32)) { 161 if (skb_linearize(skb)) 162 goto tx_drop_count; 163 } 164 165 /* Refresh the cache through streaming mapping */ 166 if (ysk2_map_skb(&tx_ring->ring, tx_info, tx_desc, skb)) 167 goto tx_drop_count; 168 169 /* count packet */ 170 tx_ring->packets++; 171 tx_ring->bytes += skb->len; 172 173 /* enqueue */ 174 tx_ring->ring.head_ptr++; 175 176 skb_tx_timestamp(skb); 177 178 /* if the tx_ring is full now, stop the tx queue */ 179 stop_queue = ysk2_is_ring_full(&tx_ring->ring); 180 if (unlikely(stop_queue)) { 181 ys_net_debug("TX ring %d full\n", txq_index); 182 netif_tx_stop_queue(tx_ring->tx_queue); 183 } 184 185 /* enqueue on NIC */ 186 if (unlikely(!netdev_xmit_more() || stop_queue)) 187 ysk2_write_head_ptr(&tx_ring->ring); 188 189 /* check if queue restarted */ 190 if (unlikely(stop_queue)) { 191 clean_tail_ptr = READ_ONCE(tx_ring->ring.clean_tail_ptr); 192 193 if (unlikely(!ysk2_is_ring_full(&tx_ring->ring))) 194 netif_tx_wake_queue(tx_ring->tx_queue); 195 } 196 197 return NETDEV_TX_OK; 198 199 tx_drop_count: 200 tx_ring->dropped_packets++; 201 tx_drop: 202 dev_kfree_skb_any(skb); 203 return NETDEV_TX_OK; 204 } 205