From df132ffc932ca0f5b99e1ca6c424d147c28bdfd8 Mon Sep 17 00:00:00 2001
From: Alex Forencich <alex@alexforencich.com>
Date: Wed, 28 Feb 2024 17:33:01 -0800
Subject: [PATCH] modules/mqnic: Add mqnic_sched_port to driver, register
 netdevs for all ports

Signed-off-by: Alex Forencich <alex@alexforencich.com>
---
 modules/mqnic/Makefile           |   1 +
 modules/mqnic/mqnic.h            |  75 ++++++++++++---
 modules/mqnic/mqnic_if.c         |  48 +++++++++-
 modules/mqnic/mqnic_netdev.c     |  60 ++++++------
 modules/mqnic/mqnic_sched_port.c | 152 +++++++++++++++++++++++++++++++
 modules/mqnic/mqnic_scheduler.c  |  94 ++++++++++++++++---
 6 files changed, 374 insertions(+), 56 deletions(-)
 create mode 100644 modules/mqnic/mqnic_sched_port.c

diff --git a/modules/mqnic/Makefile b/modules/mqnic/Makefile
index 796d737f0..8700daee3 100644
--- a/modules/mqnic/Makefile
+++ b/modules/mqnic/Makefile
@@ -15,6 +15,7 @@ mqnic-y += mqnic_if.o
 mqnic-y += mqnic_port.o
 mqnic-y += mqnic_netdev.o
 mqnic-y += mqnic_sched_block.o
+mqnic-y += mqnic_sched_port.o
 mqnic-y += mqnic_scheduler.o
 mqnic-y += mqnic_ptp.o
 mqnic-y += mqnic_i2c.o
diff --git a/modules/mqnic/mqnic.h b/modules/mqnic/mqnic.h
index 30c086aaf..1e62fddf4 100644
--- a/modules/mqnic/mqnic.h
+++ b/modules/mqnic/mqnic.h
@@ -95,6 +95,7 @@ struct mqnic_irq {
 	int irqn;
 	char name[16 + 3];
 	struct atomic_notifier_head nh;
+	struct list_head list;
 };
 
 #ifdef CONFIG_AUXILIARY_BUS
@@ -338,9 +339,18 @@ struct mqnic_sched {
 
 	int enable_count;
 
+	struct list_head sched_port_list;
+
 	u8 __iomem *hw_addr;
 };
 
+struct mqnic_sched_port {
+	struct mqnic_sched *sched;
+	int index;
+	struct list_head list;
+	struct list_head free_list;
+};
+
 struct mqnic_port {
 	struct device *dev;
 	struct mqnic_if *interface;
@@ -405,6 +415,9 @@ struct mqnic_if {
 	u32 sched_block_count;
 	struct mqnic_sched_block *sched_block[MQNIC_MAX_PORTS];
 
+	spinlock_t free_sched_port_list_lock;
+	struct list_head free_sched_port_list;
+
 	u32 max_desc_block_size;
 
 	u32 rx_queue_map_indir_table_size;
@@ -450,7 +463,7 @@ struct mqnic_priv {
 	struct rw_semaphore rxq_table_sem;
 	struct radix_tree_root rxq_table;
 
-	struct mqnic_sched_block *sched_block;
+	struct mqnic_sched_port *sched_port;
 	struct mqnic_port *port;
 
 	u32 max_desc_block_size;
@@ -503,6 +516,10 @@ u32 mqnic_interface_get_rx_queue_map_app_mask(struct mqnic_if *interface, int po
 void mqnic_interface_set_rx_queue_map_app_mask(struct mqnic_if *interface, int port, u32 val);
 u32 mqnic_interface_get_rx_queue_map_indir_table(struct mqnic_if *interface, int port, int index);
 void mqnic_interface_set_rx_queue_map_indir_table(struct mqnic_if *interface, int port, int index, u32 val);
+int mqnic_interface_register_sched_port(struct mqnic_if *interface, struct mqnic_sched_port *port);
+int mqnic_interface_unregister_sched_port(struct mqnic_if *interface, struct mqnic_sched_port *port);
+struct mqnic_sched_port *mqnic_interface_alloc_sched_port(struct mqnic_if *interface);
+void mqnic_interface_free_sched_port(struct mqnic_if *interface, struct mqnic_sched_port *port);
 
 // mqnic_port.c
 struct mqnic_port *mqnic_create_port(struct mqnic_if *interface, int index,
@@ -525,7 +542,7 @@ void mqnic_stop_port(struct net_device *ndev);
 int mqnic_update_indir_table(struct net_device *ndev);
 void mqnic_update_stats(struct net_device *ndev);
 struct net_device *mqnic_create_netdev(struct mqnic_if *interface, int index,
-		struct mqnic_port *port, struct mqnic_sched_block *sched_block);
+		struct mqnic_port *port);
 void mqnic_destroy_netdev(struct net_device *ndev);
 
 // mqnic_sched_block.c
@@ -541,22 +558,52 @@ struct mqnic_sched *mqnic_create_scheduler(struct mqnic_sched_block *block,
 void mqnic_destroy_scheduler(struct mqnic_sched *sched);
 int mqnic_scheduler_enable(struct mqnic_sched *sched);
 void mqnic_scheduler_disable(struct mqnic_sched *sched);
-int mqnic_scheduler_channel_enable(struct mqnic_sched *sched, int ch);
-void mqnic_scheduler_channel_disable(struct mqnic_sched *sched, int ch);
-void mqnic_scheduler_channel_set_dest(struct mqnic_sched *sched, int ch, int val);
-int mqnic_scheduler_channel_get_dest(struct mqnic_sched *sched, int ch);
-void mqnic_scheduler_channel_set_pkt_budget(struct mqnic_sched *sched, int ch, int val);
-int mqnic_scheduler_channel_get_pkt_budget(struct mqnic_sched *sched, int ch);
-void mqnic_scheduler_channel_set_data_budget(struct mqnic_sched *sched, int ch, int val);
-int mqnic_scheduler_channel_get_data_budget(struct mqnic_sched *sched, int ch);
-void mqnic_scheduler_channel_set_pkt_limit(struct mqnic_sched *sched, int ch, int val);
-int mqnic_scheduler_channel_get_pkt_limit(struct mqnic_sched *sched, int ch);
-void mqnic_scheduler_channel_set_data_limit(struct mqnic_sched *sched, int ch, int val);
-int mqnic_scheduler_channel_get_data_limit(struct mqnic_sched *sched, int ch);
+int mqnic_scheduler_channel_enable(struct mqnic_sched *sched, int port, int tc);
+void mqnic_scheduler_channel_disable(struct mqnic_sched *sched, int port, int tc);
+void mqnic_scheduler_channel_set_dest(struct mqnic_sched *sched, int port, int tc, int val);
+int mqnic_scheduler_channel_get_dest(struct mqnic_sched *sched, int port, int tc);
+void mqnic_scheduler_channel_set_pkt_budget(struct mqnic_sched *sched, int port, int tc, int val);
+int mqnic_scheduler_channel_get_pkt_budget(struct mqnic_sched *sched, int port, int tc);
+void mqnic_scheduler_channel_set_data_budget(struct mqnic_sched *sched, int port, int tc, int val);
+int mqnic_scheduler_channel_get_data_budget(struct mqnic_sched *sched, int port, int tc);
+void mqnic_scheduler_channel_set_pkt_limit(struct mqnic_sched *sched, int port, int tc, int val);
+int mqnic_scheduler_channel_get_pkt_limit(struct mqnic_sched *sched, int port, int tc);
+void mqnic_scheduler_channel_set_data_limit(struct mqnic_sched *sched, int port, int tc, int val);
+int mqnic_scheduler_channel_get_data_limit(struct mqnic_sched *sched, int port, int tc);
 int mqnic_scheduler_queue_enable(struct mqnic_sched *sched, int queue);
 void mqnic_scheduler_queue_disable(struct mqnic_sched *sched, int queue);
 void mqnic_scheduler_queue_set_pause(struct mqnic_sched *sched, int queue, int val);
 int mqnic_scheduler_queue_get_pause(struct mqnic_sched *sched, int queue);
+int mqnic_scheduler_queue_port_enable(struct mqnic_sched *sched, int queue, int port);
+void mqnic_scheduler_queue_port_disable(struct mqnic_sched *sched, int queue, int port);
+void mqnic_scheduler_queue_port_set_pause(struct mqnic_sched *sched, int queue, int port, int val);
+int mqnic_scheduler_queue_port_get_pause(struct mqnic_sched *sched, int queue, int port);
+void mqnic_scheduler_queue_port_set_tc(struct mqnic_sched *sched, int queue, int port, int val);
+int mqnic_scheduler_queue_port_get_tc(struct mqnic_sched *sched, int queue, int port);
+
+// mqnic_sched_port.c
+struct mqnic_sched_port *mqnic_create_sched_port(struct mqnic_sched *sched, int index);
+void mqnic_destroy_sched_port(struct mqnic_sched_port *port);
+int mqnic_sched_port_enable(struct mqnic_sched_port *port);
+void mqnic_sched_port_disable(struct mqnic_sched_port *port);
+int mqnic_sched_port_channel_enable(struct mqnic_sched_port *port, int tc);
+void mqnic_sched_port_channel_disable(struct mqnic_sched_port *port, int tc);
+void mqnic_sched_port_channel_set_dest(struct mqnic_sched_port *port, int tc, int val);
+int mqnic_sched_port_channel_get_dest(struct mqnic_sched_port *port, int tc);
+void mqnic_sched_port_channel_set_pkt_budget(struct mqnic_sched_port *port, int tc, int val);
+int mqnic_sched_port_channel_get_pkt_budget(struct mqnic_sched_port *port, int tc);
+void mqnic_sched_port_channel_set_data_budget(struct mqnic_sched_port *port, int tc, int val);
+int mqnic_sched_port_channel_get_data_budget(struct mqnic_sched_port *port, int tc);
+void mqnic_sched_port_channel_set_pkt_limit(struct mqnic_sched_port *port, int tc, int val);
+int mqnic_sched_port_channel_get_pkt_limit(struct mqnic_sched_port *port, int tc);
+void mqnic_sched_port_channel_set_data_limit(struct mqnic_sched_port *port, int tc, int val);
+int mqnic_sched_port_channel_get_data_limit(struct mqnic_sched_port *port, int tc);
+int mqnic_sched_port_queue_enable(struct mqnic_sched_port *port, int queue);
+void mqnic_sched_port_queue_disable(struct mqnic_sched_port *port, int queue);
+void mqnic_sched_port_queue_set_pause(struct mqnic_sched_port *port, int queue, int val);
+int mqnic_sched_port_queue_get_pause(struct mqnic_sched_port *port, int queue);
+void mqnic_sched_port_queue_set_tc(struct mqnic_sched_port *port, int queue, int val);
+int mqnic_sched_port_queue_get_tc(struct mqnic_sched_port *port, int queue);
 
 // mqnic_ptp.c
 void mqnic_register_phc(struct mqnic_dev *mdev);
diff --git a/modules/mqnic/mqnic_if.c b/modules/mqnic/mqnic_if.c
index 3445f13e3..a69651e05 100644
--- a/modules/mqnic/mqnic_if.c
+++ b/modules/mqnic/mqnic_if.c
@@ -29,6 +29,9 @@ struct mqnic_if *mqnic_create_interface(struct mqnic_dev *mdev, int index, u8 __
 	interface->hw_addr = hw_addr;
 	interface->csr_hw_addr = hw_addr + mdev->if_csr_offset;
 
+	INIT_LIST_HEAD(&interface->free_sched_port_list);
+	spin_lock_init(&interface->free_sched_port_list_lock);
+
 	// Enumerate registers
 	interface->rb_list = mqnic_enumerate_reg_block_list(interface->hw_addr, mdev->if_csr_offset, interface->hw_regs_size);
 	if (!interface->rb_list) {
@@ -267,10 +270,10 @@ struct mqnic_if *mqnic_create_interface(struct mqnic_dev *mdev, int index, u8 __
 	}
 
 	// create net_devices
-	interface->ndev_count = 1;
+	interface->ndev_count = interface->port_count;
 	for (k = 0; k < interface->ndev_count; k++) {
 		struct net_device *ndev = mqnic_create_netdev(interface, k,
-				interface->port[k], interface->sched_block[k]);
+				interface->port[k]);
 		if (IS_ERR_OR_NULL(ndev)) {
 			ret = PTR_ERR(ndev);
 			goto fail;
@@ -395,3 +398,44 @@ void mqnic_interface_set_rx_queue_map_indir_table(struct mqnic_if *interface, in
 	iowrite32(val, interface->rx_queue_map_indir_table[port] + index*4);
 }
 EXPORT_SYMBOL(mqnic_interface_set_rx_queue_map_indir_table);
+
+int mqnic_interface_register_sched_port(struct mqnic_if *interface, struct mqnic_sched_port *port)
+{
+	spin_lock(&interface->free_sched_port_list_lock);
+	list_add_tail(&port->free_list, &interface->free_sched_port_list);
+	spin_unlock(&interface->free_sched_port_list_lock);
+	return 0;
+}
+
+int mqnic_interface_unregister_sched_port(struct mqnic_if *interface, struct mqnic_sched_port *port)
+{
+	spin_lock(&interface->free_sched_port_list_lock);
+	list_del(&port->free_list);
+	spin_unlock(&interface->free_sched_port_list_lock);
+	return 0;
+}
+
+struct mqnic_sched_port *mqnic_interface_alloc_sched_port(struct mqnic_if *interface)
+{
+	struct mqnic_sched_port *port = NULL;
+
+	spin_lock(&interface->free_sched_port_list_lock);
+
+	port = list_first_entry_or_null(&interface->free_sched_port_list, struct mqnic_sched_port, free_list);
+
+	if (port)
+		list_del(&port->free_list);
+
+	spin_unlock(&interface->free_sched_port_list_lock);
+
+	return port;
+}
+
+void mqnic_interface_free_sched_port(struct mqnic_if *interface, struct mqnic_sched_port *port)
+{
+	if (!port)
+		return;
+	spin_lock(&interface->free_sched_port_list_lock);
+	list_add_tail(&port->free_list, &interface->free_sched_port_list);
+	spin_unlock(&interface->free_sched_port_list_lock);
+}
diff --git a/modules/mqnic/mqnic_netdev.c b/modules/mqnic/mqnic_netdev.c
index 043a0b9b8..1f437d837 100644
--- a/modules/mqnic/mqnic_netdev.c
+++ b/modules/mqnic/mqnic_netdev.c
@@ -27,6 +27,9 @@ int mqnic_start_port(struct net_device *ndev)
 
 	desc_block_size = min_t(u32, priv->interface->max_desc_block_size, 4);
 
+	// allocate scheduler port
+	priv->sched_port = mqnic_interface_alloc_sched_port(iface);
+
 	// set up RX queues
 	for (k = 0; k < priv->rxq_count; k++) {
 		// create CQ
@@ -166,20 +169,22 @@ int mqnic_start_port(struct net_device *ndev)
 	radix_tree_for_each_slot(slot, &priv->txq_table, &iter, 0) {
 		struct mqnic_ring *q = (struct mqnic_ring *)*slot;
 
-		mqnic_scheduler_queue_enable(priv->sched_block->sched[0], q->index);
+		mqnic_sched_port_queue_set_tc(priv->sched_port, q->index, 0);
+		mqnic_sched_port_queue_enable(priv->sched_port, q->index);
 	}
 	up_read(&priv->txq_table_sem);
 
 	// configure scheduler flow control
-	mqnic_scheduler_channel_set_pkt_budget(priv->sched_block->sched[0], 0, 1);
-	mqnic_scheduler_channel_set_data_budget(priv->sched_block->sched[0], 0, ndev->mtu + ETH_HLEN);
-	mqnic_scheduler_channel_set_pkt_limit(priv->sched_block->sched[0], 0, 0xFFFF);
-	mqnic_scheduler_channel_set_data_limit(priv->sched_block->sched[0], 0, iface->tx_fifo_depth);
+	mqnic_sched_port_channel_set_dest(priv->sched_port, 0, (priv->port->index << 4) | 0);
+	mqnic_sched_port_channel_set_pkt_budget(priv->sched_port, 0, 1);
+	mqnic_sched_port_channel_set_data_budget(priv->sched_port, 0, ndev->mtu + ETH_HLEN);
+	mqnic_sched_port_channel_set_pkt_limit(priv->sched_port, 0, 0xFFFF);
+	mqnic_sched_port_channel_set_data_limit(priv->sched_port, 0, iface->tx_fifo_depth);
 
-	mqnic_scheduler_channel_enable(priv->sched_block->sched[0], 0);
+	mqnic_sched_port_channel_enable(priv->sched_port, 0);
 
 	// enable scheduler
-	mqnic_activate_sched_block(priv->sched_block);
+	mqnic_sched_port_enable(priv->sched_port);
 
 	netif_tx_start_all_queues(ndev);
 	netif_device_attach(ndev);
@@ -229,20 +234,18 @@ void mqnic_stop_port(struct net_device *ndev)
 	mqnic_update_stats(ndev);
 	spin_unlock_bh(&priv->stats_lock);
 
-	// configure scheduler
-	down_read(&priv->txq_table_sem);
-	radix_tree_for_each_slot(slot, &priv->txq_table, &iter, 0) {
-		struct mqnic_ring *q = (struct mqnic_ring *)*slot;
-
-		mqnic_scheduler_queue_disable(priv->sched_block->sched[0], q->index);
-	}
-	up_read(&priv->txq_table_sem);
+	if (priv->sched_port) {
+		down_read(&priv->txq_table_sem);
+		radix_tree_for_each_slot(slot, &priv->txq_table, &iter, 0) {
+			struct mqnic_ring *q = (struct mqnic_ring *)*slot;
 
-	// configure scheduler flow control
-	mqnic_scheduler_channel_disable(priv->sched_block->sched[0], 0);
+			mqnic_sched_port_queue_disable(priv->sched_port, q->index);
+		}
+		up_read(&priv->txq_table_sem);
 
-	// disable scheduler
-	mqnic_deactivate_sched_block(priv->sched_block);
+		mqnic_sched_port_channel_disable(priv->sched_port, 0);
+		mqnic_sched_port_disable(priv->sched_port);
+	}
 
 	// disable TX and RX queues
 	down_read(&priv->txq_table_sem);
@@ -297,6 +300,11 @@ void mqnic_stop_port(struct net_device *ndev)
 		mqnic_destroy_cq(cq);
 	}
 	up_write(&priv->rxq_table_sem);
+
+	// free scheduler port
+	if (priv->sched_port)
+		mqnic_interface_free_sched_port(priv->interface, priv->sched_port);
+	priv->sched_port = NULL;
 }
 
 static int mqnic_open(struct net_device *ndev)
@@ -338,8 +346,8 @@ int mqnic_update_indir_table(struct net_device *ndev)
 	struct mqnic_ring *q;
 	int k;
 
-	mqnic_interface_set_rx_queue_map_rss_mask(iface, 0, 0xffffffff);
-	mqnic_interface_set_rx_queue_map_app_mask(iface, 0, 0);
+	mqnic_interface_set_rx_queue_map_rss_mask(iface, priv->port->index, 0xffffffff);
+	mqnic_interface_set_rx_queue_map_app_mask(iface, priv->port->index, 0);
 
 	for (k = 0; k < priv->rx_queue_map_indir_table_size; k++) {
 		rcu_read_lock();
@@ -347,7 +355,7 @@ int mqnic_update_indir_table(struct net_device *ndev)
 		rcu_read_unlock();
 
 		if (q)
-			mqnic_interface_set_rx_queue_map_indir_table(iface, 0, k, q->index);
+			mqnic_interface_set_rx_queue_map_indir_table(iface, priv->port->index, k, q->index);
 	}
 
 	return 0;
@@ -573,7 +581,7 @@ static void mqnic_link_status_timeout(struct timer_list *timer)
 }
 
 struct net_device *mqnic_create_netdev(struct mqnic_if *interface, int index,
-		struct mqnic_port *port, struct mqnic_sched_block *sched_block)
+		struct mqnic_port *port)
 {
 	struct mqnic_dev *mdev = interface->mdev;
 	struct device *dev = interface->dev;
@@ -610,13 +618,13 @@ struct net_device *mqnic_create_netdev(struct mqnic_if *interface, int index,
 	priv->index = index;
 	priv->port = port;
 	priv->port_up = false;
-	priv->sched_block = sched_block;
+	priv->sched_port = NULL;
 
 	// associate interface resources
 	priv->if_features = interface->if_features;
 
-	priv->txq_count = min_t(u32, mqnic_res_get_count(interface->txq_res), 256);
-	priv->rxq_count = min_t(u32, mqnic_res_get_count(interface->rxq_res), num_online_cpus());
+	priv->txq_count = min_t(u32, mqnic_res_get_count(interface->txq_res) / interface->port_count, 256);
+	priv->rxq_count = min_t(u32, mqnic_res_get_count(interface->rxq_res) / interface->port_count, num_online_cpus());
 
 	priv->tx_ring_size = roundup_pow_of_two(clamp_t(u32, mqnic_num_txq_entries,
 			MQNIC_MIN_TX_RING_SZ, MQNIC_MAX_TX_RING_SZ));
diff --git a/modules/mqnic/mqnic_sched_port.c b/modules/mqnic/mqnic_sched_port.c
new file mode 100644
index 000000000..163a1d867
--- /dev/null
+++ b/modules/mqnic/mqnic_sched_port.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: BSD-2-Clause-Views
+/*
+ * Copyright (c) 2024 The Regents of the University of California
+ */
+
+#include "mqnic.h"
+
+struct mqnic_sched_port *mqnic_create_sched_port(struct mqnic_sched *sched, int index)
+{
+    struct mqnic_sched_port *port;
+
+    port = kzalloc(sizeof(*port), GFP_KERNEL);
+    if (!port)
+        return ERR_PTR(-ENOMEM);
+
+    port->sched = sched;
+
+    port->index = index;
+
+    return port;
+}
+
+void mqnic_destroy_sched_port(struct mqnic_sched_port *port)
+{
+    kfree(port);
+}
+
+int mqnic_sched_port_enable(struct mqnic_sched_port *port)
+{
+    return mqnic_scheduler_enable(port->sched);
+}
+EXPORT_SYMBOL(mqnic_sched_port_enable);
+
+void mqnic_sched_port_disable(struct mqnic_sched_port *port)
+{
+    mqnic_scheduler_disable(port->sched);
+}
+EXPORT_SYMBOL(mqnic_sched_port_disable);
+
+int mqnic_sched_port_channel_enable(struct mqnic_sched_port *port, int tc)
+{
+    return mqnic_scheduler_channel_enable(port->sched, port->index, tc);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_enable);
+
+void mqnic_sched_port_channel_disable(struct mqnic_sched_port *port, int tc)
+{
+    mqnic_scheduler_channel_disable(port->sched, port->index, tc);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_disable);
+
+void mqnic_sched_port_channel_set_dest(struct mqnic_sched_port *port, int tc, int val)
+{
+    mqnic_scheduler_channel_set_dest(port->sched, port->index, tc, val);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_set_dest);
+
+int mqnic_sched_port_channel_get_dest(struct mqnic_sched_port *port, int tc)
+{
+    return mqnic_scheduler_channel_get_dest(port->sched, port->index, tc);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_get_dest);
+
+void mqnic_sched_port_channel_set_pkt_budget(struct mqnic_sched_port *port, int tc, int val)
+{
+    mqnic_scheduler_channel_set_pkt_budget(port->sched, port->index, tc, val);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_set_pkt_budget);
+
+int mqnic_sched_port_channel_get_pkt_budget(struct mqnic_sched_port *port, int tc)
+{
+    return mqnic_scheduler_channel_get_pkt_budget(port->sched, port->index, tc);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_get_pkt_budget);
+
+void mqnic_sched_port_channel_set_data_budget(struct mqnic_sched_port *port, int tc, int val)
+{
+    mqnic_scheduler_channel_set_data_budget(port->sched, port->index, tc, val);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_set_data_budget);
+
+int mqnic_sched_port_channel_get_data_budget(struct mqnic_sched_port *port, int tc)
+{
+    return mqnic_scheduler_channel_get_data_budget(port->sched, port->index, tc);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_get_data_budget);
+
+void mqnic_sched_port_channel_set_pkt_limit(struct mqnic_sched_port *port, int tc, int val)
+{
+    mqnic_scheduler_channel_set_pkt_limit(port->sched, port->index, tc, val);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_set_pkt_limit);
+
+int mqnic_sched_port_channel_get_pkt_limit(struct mqnic_sched_port *port, int tc)
+{
+    return mqnic_scheduler_channel_get_pkt_limit(port->sched, port->index, tc);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_get_pkt_limit);
+
+void mqnic_sched_port_channel_set_data_limit(struct mqnic_sched_port *port, int tc, int val)
+{
+    mqnic_scheduler_channel_set_data_limit(port->sched, port->index, tc, val);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_set_data_limit);
+
+int mqnic_sched_port_channel_get_data_limit(struct mqnic_sched_port *port, int tc)
+{
+    return mqnic_scheduler_channel_get_data_limit(port->sched, port->index, tc);
+}
+EXPORT_SYMBOL(mqnic_sched_port_channel_get_data_limit);
+
+int mqnic_sched_port_queue_enable(struct mqnic_sched_port *port, int queue)
+{
+    int ret = mqnic_scheduler_queue_enable(port->sched, queue);
+
+    if (ret)
+        return ret;
+
+    return mqnic_scheduler_queue_port_enable(port->sched, port->index, queue);
+}
+EXPORT_SYMBOL(mqnic_sched_port_queue_enable);
+
+void mqnic_sched_port_queue_disable(struct mqnic_sched_port *port, int queue)
+{
+    mqnic_scheduler_queue_port_disable(port->sched, port->index, queue);
+    mqnic_scheduler_queue_disable(port->sched, queue);
+}
+EXPORT_SYMBOL(mqnic_sched_port_queue_disable);
+
+void mqnic_sched_port_queue_set_pause(struct mqnic_sched_port *port, int queue, int val)
+{
+    mqnic_scheduler_queue_port_set_pause(port->sched, port->index, queue, val);
+}
+EXPORT_SYMBOL(mqnic_sched_port_queue_set_pause);
+
+int mqnic_sched_port_queue_get_pause(struct mqnic_sched_port *port, int queue)
+{
+    return mqnic_scheduler_queue_port_get_pause(port->sched, port->index, queue);
+}
+EXPORT_SYMBOL(mqnic_sched_port_queue_get_pause);
+
+void mqnic_sched_port_queue_set_tc(struct mqnic_sched_port *port, int queue, int val)
+{
+    mqnic_scheduler_queue_port_set_tc(port->sched, port->index, queue, val);
+}
+EXPORT_SYMBOL(mqnic_sched_port_queue_set_tc);
+
+int mqnic_sched_port_queue_get_tc(struct mqnic_sched_port *port, int queue)
+{
+    return mqnic_scheduler_queue_port_get_tc(port->sched, port->index, queue);
+}
+EXPORT_SYMBOL(mqnic_sched_port_queue_get_tc);
diff --git a/modules/mqnic/mqnic_scheduler.c b/modules/mqnic/mqnic_scheduler.c
index 5dd76e2bf..dff01eca0 100644
--- a/modules/mqnic/mqnic_scheduler.c
+++ b/modules/mqnic/mqnic_scheduler.c
@@ -21,6 +21,7 @@ struct mqnic_sched *mqnic_create_scheduler(struct mqnic_sched_block *block,
 	struct device *dev = block->dev;
 	struct mqnic_sched *sched;
 	u32 val;
+	int k;
 
 	sched = kzalloc(sizeof(*sched), GFP_KERNEL);
 	if (!sched)
@@ -48,6 +49,7 @@ struct mqnic_sched *mqnic_create_scheduler(struct mqnic_sched_block *block,
 	sched->fc_scale = 1 << ((val >> 16) & 0xff);
 
 	sched->enable_count = 0;
+	_mqnic_scheduler_disable(sched);
 
 	dev_info(dev, "Scheduler type: 0x%08x", sched->type);
 	dev_info(dev, "Scheduler offset: 0x%08x", sched->offset);
@@ -58,15 +60,29 @@ struct mqnic_sched *mqnic_create_scheduler(struct mqnic_sched_block *block,
 	dev_info(dev, "Scheduler channel count: %d", sched->channel_count);
 	dev_info(dev, "Scheduler FC scale: %d", sched->fc_scale);
 
-	_mqnic_scheduler_disable(sched);
+	INIT_LIST_HEAD(&sched->sched_port_list);
+
+	for (k = 0; k < sched->port_count; k++) {
+		struct mqnic_sched_port *port = mqnic_create_sched_port(sched, k);
+		list_add_tail(&port->list, &sched->sched_port_list);
+		mqnic_interface_register_sched_port(sched->interface, port);
+	}
 
 	return sched;
 }
 
 void mqnic_destroy_scheduler(struct mqnic_sched *sched)
 {
+	struct mqnic_sched_port *port, *port_safe;
+
 	_mqnic_scheduler_disable(sched);
 
+	list_for_each_entry_safe(port, port_safe, &sched->sched_port_list, list) {
+		mqnic_interface_unregister_sched_port(sched->interface, port);
+		list_del(&port->list);
+		mqnic_destroy_sched_port(port);
+	}
+
 	kfree(sched);
 }
 
@@ -90,78 +106,90 @@ void mqnic_scheduler_disable(struct mqnic_sched *sched)
 }
 EXPORT_SYMBOL(mqnic_scheduler_disable);
 
-int mqnic_scheduler_channel_enable(struct mqnic_sched *sched, int ch)
+int mqnic_scheduler_channel_enable(struct mqnic_sched *sched, int port, int tc)
 {
+	int ch = sched->tc_count*port + tc;
 	iowrite32(1, sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_CTRL + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 
 	return 0;
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_enable);
 
-void mqnic_scheduler_channel_disable(struct mqnic_sched *sched, int ch)
+void mqnic_scheduler_channel_disable(struct mqnic_sched *sched, int port, int tc)
 {
+	int ch = sched->tc_count*port + tc;
 	iowrite32(0, sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_CTRL + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_disable);
 
-void mqnic_scheduler_channel_set_dest(struct mqnic_sched *sched, int ch, int val)
+void mqnic_scheduler_channel_set_dest(struct mqnic_sched *sched, int port, int tc, int val)
 {
+	int ch = sched->tc_count*port + tc;
 	iowrite16(val, sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC1_DEST + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_set_dest);
 
-int mqnic_scheduler_channel_get_dest(struct mqnic_sched *sched, int ch)
+int mqnic_scheduler_channel_get_dest(struct mqnic_sched *sched, int port, int tc)
 {
+	int ch = sched->tc_count*port + tc;
 	return ioread16(sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC1_DEST + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_get_dest);
 
-void mqnic_scheduler_channel_set_pkt_budget(struct mqnic_sched *sched, int ch, int val)
+void mqnic_scheduler_channel_set_pkt_budget(struct mqnic_sched *sched, int port, int tc, int val)
 {
+	int ch = sched->tc_count*port + tc;
 	iowrite16(val, sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC1_PB + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_set_pkt_budget);
 
-int mqnic_scheduler_channel_get_pkt_budget(struct mqnic_sched *sched, int ch)
+int mqnic_scheduler_channel_get_pkt_budget(struct mqnic_sched *sched, int port, int tc)
 {
+	int ch = sched->tc_count*port + tc;
 	return ioread16(sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC1_PB + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_get_pkt_budget);
 
-void mqnic_scheduler_channel_set_data_budget(struct mqnic_sched *sched, int ch, int val)
+void mqnic_scheduler_channel_set_data_budget(struct mqnic_sched *sched, int port, int tc, int val)
 {
+	int ch = sched->tc_count*port + tc;
 	val = (val + sched->fc_scale-1) / sched->fc_scale;
 	iowrite16(val, sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC2_DB + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_set_data_budget);
 
-int mqnic_scheduler_channel_get_data_budget(struct mqnic_sched *sched, int ch)
+int mqnic_scheduler_channel_get_data_budget(struct mqnic_sched *sched, int port, int tc)
 {
+	int ch = sched->tc_count*port + tc;
 	return (int)ioread16(sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC2_DB + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE) * sched->fc_scale;
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_get_data_budget);
 
-void mqnic_scheduler_channel_set_pkt_limit(struct mqnic_sched *sched, int ch, int val)
+void mqnic_scheduler_channel_set_pkt_limit(struct mqnic_sched *sched, int port, int tc, int val)
 {
+	int ch = sched->tc_count*port + tc;
 	iowrite16(val, sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC2_PL + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_set_pkt_limit);
 
-int mqnic_scheduler_channel_get_pkt_limit(struct mqnic_sched *sched, int ch)
+int mqnic_scheduler_channel_get_pkt_limit(struct mqnic_sched *sched, int port, int tc)
 {
+	int ch = sched->tc_count*port + tc;
 	return ioread16(sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC2_PL + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_get_pkt_limit);
 
-void mqnic_scheduler_channel_set_data_limit(struct mqnic_sched *sched, int ch, int val)
+void mqnic_scheduler_channel_set_data_limit(struct mqnic_sched *sched, int port, int tc, int val)
 {
+	int ch = sched->tc_count*port + tc;
 	val = (val + sched->fc_scale-1) / sched->fc_scale;
 	iowrite32(val, sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC3_DL + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_set_data_limit);
 
-int mqnic_scheduler_channel_get_data_limit(struct mqnic_sched *sched, int ch)
+int mqnic_scheduler_channel_get_data_limit(struct mqnic_sched *sched, int port, int tc)
 {
+	int ch = sched->tc_count*port + tc;
 	return (int)ioread32(sched->rb->regs + MQNIC_RB_SCHED_RR_REG_CH0_FC3_DL + ch*MQNIC_RB_SCHED_RR_REG_CH_STRIDE) * sched->fc_scale;
 }
 EXPORT_SYMBOL(mqnic_scheduler_channel_get_data_limit);
@@ -188,6 +216,44 @@ EXPORT_SYMBOL(mqnic_scheduler_queue_set_pause);
 
 int mqnic_scheduler_queue_get_pause(struct mqnic_sched *sched, int queue)
 {
-	return !!(ioread32(sched->hw_addr + sched->queue_stride*queue) & (1 << 7));
+	return !!(ioread32(sched->hw_addr + sched->queue_stride*queue) & MQNIC_SCHED_RR_QUEUE_PAUSE);
 }
 EXPORT_SYMBOL(mqnic_scheduler_queue_get_pause);
+
+int mqnic_scheduler_queue_port_enable(struct mqnic_sched *sched, int queue, int port)
+{
+	iowrite32(MQNIC_SCHED_RR_CMD_SET_PORT_ENABLE | (port << 8) | 1, sched->hw_addr + sched->queue_stride*queue);
+
+	return 0;
+}
+EXPORT_SYMBOL(mqnic_scheduler_queue_port_enable);
+
+void mqnic_scheduler_queue_port_disable(struct mqnic_sched *sched, int queue, int port)
+{
+	iowrite32(MQNIC_SCHED_RR_CMD_SET_PORT_ENABLE | (port << 8) | 0, sched->hw_addr + sched->queue_stride*queue);
+}
+EXPORT_SYMBOL(mqnic_scheduler_queue_port_disable);
+
+void mqnic_scheduler_queue_port_set_pause(struct mqnic_sched *sched, int queue, int port, int val)
+{
+	iowrite32(MQNIC_SCHED_RR_CMD_SET_PORT_PAUSE | (port << 8) | (val ? 1 : 0), sched->hw_addr + sched->queue_stride*queue);
+}
+EXPORT_SYMBOL(mqnic_scheduler_queue_port_set_pause);
+
+int mqnic_scheduler_queue_port_get_pause(struct mqnic_sched *sched, int queue, int port)
+{
+	return !!((ioread32(sched->hw_addr + sched->queue_stride*queue) >> port*8) & MQNIC_SCHED_RR_PORT_PAUSE);
+}
+EXPORT_SYMBOL(mqnic_scheduler_queue_port_get_pause);
+
+void mqnic_scheduler_queue_port_set_tc(struct mqnic_sched *sched, int queue, int port, int val)
+{
+	iowrite32(MQNIC_SCHED_RR_CMD_SET_PORT_TC | (port << 8) | (val & 0x7), sched->hw_addr + sched->queue_stride*queue);
+}
+EXPORT_SYMBOL(mqnic_scheduler_queue_port_set_tc);
+
+int mqnic_scheduler_queue_port_get_tc(struct mqnic_sched *sched, int queue, int port)
+{
+	return !!((ioread32(sched->hw_addr + sched->queue_stride*queue) >> port*8) & MQNIC_SCHED_RR_PORT_TC);
+}
+EXPORT_SYMBOL(mqnic_scheduler_queue_port_get_tc);