diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index a90612ab5780..3c422aac778b 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1768,3 +1768,26 @@ Description:
 		====================   ===========================
 
 		The attribute is read only.
+
+What:		/sys/bus/platform/drivers/ufshcd/*/dme_qos_notification
+What:		/sys/bus/platform/devices/*.ufs/dme_qos_notification
+Date:		March 2026
+Contact:	Can Guo <can.guo@oss.qualcomm.com>
+Description:
+		This attribute reports and clears pending DME (Device Management
+		Entity) Quality of Service (QoS) notifications. This attribute
+		is a bitfield with the following bit assignments:
+
+		Bit	Description
+		===	======================================
+		0	DME QoS Monitor has been reset by host
+		1	QoS from TX is detected
+		2	QoS from RX is detected
+		3	QoS from PA_INIT is detected
+
+		Reading this attribute returns the pending DME QoS notification
+		bits. Writing '0' to this attribute clears pending DME QoS
+		notification bits. Writing any non-zero value is invalid and
+		will be rejected.
+
+		The attribute is read/write.
diff --git a/Documentation/devicetree/bindings/ufs/qcom,sc7180-ufshc.yaml b/Documentation/devicetree/bindings/ufs/qcom,sc7180-ufshc.yaml
index d94ef4e6b85a..3c407426d697 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,sc7180-ufshc.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,sc7180-ufshc.yaml
@@ -15,6 +15,7 @@ select:
     compatible:
       contains:
         enum:
+          - qcom,milos-ufshc
           - qcom,msm8998-ufshc
           - qcom,qcs8300-ufshc
           - qcom,sa8775p-ufshc
@@ -31,21 +32,28 @@ select:
 
 properties:
   compatible:
-    items:
-      - enum:
-          - qcom,msm8998-ufshc
-          - qcom,qcs8300-ufshc
-          - qcom,sa8775p-ufshc
-          - qcom,sc7180-ufshc
-          - qcom,sc7280-ufshc
-          - qcom,sc8180x-ufshc
-          - qcom,sc8280xp-ufshc
-          - qcom,sm8250-ufshc
-          - qcom,sm8350-ufshc
-          - qcom,sm8450-ufshc
-          - qcom,sm8550-ufshc
-      - const: qcom,ufshc
-      - const: jedec,ufs-2.0
+    oneOf:
+      - items:
+          - enum:
+              - qcom,x1e80100-ufshc
+          - const: qcom,sm8550-ufshc
+          - const: qcom,ufshc
+      - items:
+          - enum:
+              - qcom,milos-ufshc
+              - qcom,msm8998-ufshc
+              - qcom,qcs8300-ufshc
+              - qcom,sa8775p-ufshc
+              - qcom,sc7180-ufshc
+              - qcom,sc7280-ufshc
+              - qcom,sc8180x-ufshc
+              - qcom,sc8280xp-ufshc
+              - qcom,sm8250-ufshc
+              - qcom,sm8350-ufshc
+              - qcom,sm8450-ufshc
+              - qcom,sm8550-ufshc
+          - const: qcom,ufshc
+          - const: jedec,ufs-2.0
 
   reg:
     maxItems: 1
diff --git a/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml b/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
index cea84ab2204f..f28641c6e68f 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
@@ -15,6 +15,7 @@ select:
     compatible:
       contains:
         enum:
+          - qcom,eliza-ufshc
           - qcom,kaanapali-ufshc
           - qcom,sm8650-ufshc
           - qcom,sm8750-ufshc
@@ -25,6 +26,7 @@ properties:
   compatible:
     items:
       - enum:
+          - qcom,eliza-ufshc
           - qcom,kaanapali-ufshc
           - qcom,sm8650-ufshc
           - qcom,sm8750-ufshc
@@ -66,6 +68,18 @@ required:
 
 allOf:
   - $ref: qcom,ufs-common.yaml
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,eliza-ufshc
+    then:
+      properties:
+        reg:
+          minItems: 2
+        reg-names:
+          minItems: 2
 
 unevaluatedProperties: false
 
diff --git a/Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml b/Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml
index c7d17cf4dc42..e738153a309c 100644
--- a/Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml
+++ b/Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml
@@ -41,7 +41,7 @@ properties:
     maxItems: 1
 
   resets:
-    maxItems: 4
+    maxItems: 5
 
   reset-names:
     items:
@@ -49,6 +49,7 @@ properties:
       - const: sys
       - const: ufs
       - const: grf
+      - const: mphy
 
   reset-gpios:
     maxItems: 1
@@ -98,8 +99,8 @@ examples:
             interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
             power-domains = <&power RK3576_PD_USB>;
             resets = <&cru SRST_A_UFS_BIU>, <&cru SRST_A_UFS_SYS>, <&cru SRST_A_UFS>,
-                     <&cru SRST_P_UFS_GRF>;
-            reset-names = "biu", "sys", "ufs", "grf";
+                     <&cru SRST_P_UFS_GRF>, <&cru SRST_MPHY_INIT>;
+            reset-names = "biu", "sys", "ufs", "grf", "mphy";
             reset-gpios = <&gpio4 RK_PD0 GPIO_ACTIVE_LOW>;
         };
     };
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index e00b87acf481..9aec5d80117f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3925,6 +3925,7 @@ static const struct target_core_fabric_ops srpt_template = {
 	.tfc_wwn_attrs			= srpt_wwn_attrs,
 	.tfc_tpg_attrib_attrs		= srpt_tpg_attrib_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index da6599ae3d0d..5304d2febd63 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -1632,8 +1632,8 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
 	/*
 	   Initialize the Host Adapter Full Model Name from the Model Name.
 	 */
-	strcpy(adapter->full_model, "BusLogic ");
-	strcat(adapter->full_model, adapter->model);
+	scnprintf(adapter->full_model, sizeof(adapter->full_model),
+		  "BusLogic %s", adapter->model);
 	/*
 	   Select an appropriate value for the Tagged Queue Depth either from a
 	   BusLogic Driver Options specification, or based on whether this Host
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 19d0884479a2..f811ce473c2a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1151,6 +1151,7 @@ config SCSI_LPFC
 	depends on NVME_TARGET_FC || NVME_TARGET_FC=n
 	depends on NVME_FC || NVME_FC=n
 	select CRC_T10DIF
+	select CRC32
 	select IRQ_POLL
 	help
           This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index d6e35ee8fee0..67d686dd6fb3 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -1612,6 +1612,7 @@ static const struct target_core_fabric_ops efct_lio_ops = {
 	.sess_get_initiator_sid		= NULL,
 	.tfc_tpg_base_attrs		= efct_lio_tpg_attrs,
 	.tfc_tpg_attrib_attrs           = efct_lio_tpg_attrib_attrs,
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
@@ -1650,6 +1651,7 @@ static const struct target_core_fabric_ops efct_lio_npiv_ops = {
 	.tfc_tpg_base_attrs		= efct_lio_npiv_tpg_attrs,
 	.tfc_tpg_attrib_attrs		= efct_lio_npiv_tpg_attrib_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
index d6c87a0bae09..46f489b2263c 100644
--- a/drivers/scsi/esas2r/esas2r_log.c
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -101,11 +101,6 @@ static const char *translate_esas2r_event_level_to_kernel(const long level)
 	}
 }
 
-#pragma GCC diagnostic push
-#ifndef __clang__
-#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
-#endif
-
 /*
  * the master logging function.  this function will format the message as
  * outlined by the formatting string, the input device information and the
@@ -118,10 +113,9 @@ static const char *translate_esas2r_event_level_to_kernel(const long level)
  *
  * @return 0 on success, or -1 if an error occurred.
  */
-static int esas2r_log_master(const long level,
-			     const struct device *dev,
-			     const char *format,
-			     va_list args)
+static __printf(3, 0)
+int esas2r_log_master(const long level, const struct device *dev,
+		      const char *format, va_list args)
 {
 	if (level <= event_log_level) {
 		unsigned long flags = 0;
@@ -175,8 +169,6 @@ static int esas2r_log_master(const long level,
 	return 0;
 }
 
-#pragma GCC diagnostic pop
-
 /*
  * formats and logs a message to the system log.
  *
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
index 455426564ca0..554dea767885 100644
--- a/drivers/scsi/fnic/fdls_disc.c
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -4613,7 +4613,7 @@ void fnic_fdls_disc_start(struct fnic_iport_s *iport)
 	if (!iport->usefip) {
 		if (iport->flags & FNIC_FIRST_LINK_UP) {
 			spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
-			fnic_scsi_fcpio_reset(iport->fnic);
+			fnic_fcpio_reset(iport->fnic);
 			spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
 
 			iport->flags &= ~FNIC_FIRST_LINK_UP;
@@ -5072,7 +5072,7 @@ void fnic_fdls_link_down(struct fnic_iport_s *iport)
 	iport->fabric.flags = 0;
 
 	spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
-	fnic_scsi_fcpio_reset(iport->fnic);
+	fnic_fcpio_reset(iport->fnic);
 	spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
 	list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c
index ccd0da7d490f..132f00512ee1 100644
--- a/drivers/scsi/fnic/fip.c
+++ b/drivers/scsi/fnic/fip.c
@@ -737,7 +737,7 @@ void fnic_work_on_fip_timer(struct work_struct *work)
 		if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) {
 
 			if (iport->flags & FNIC_FIRST_LINK_UP) {
-				fnic_scsi_fcpio_reset(iport->fnic);
+				fnic_fcpio_reset(iport->fnic);
 				iport->flags &= ~FNIC_FIRST_LINK_UP;
 			}
 
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 42237eb3222f..8724d64f2525 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -30,7 +30,7 @@
 
 #define DRV_NAME		"fnic"
 #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
-#define DRV_VERSION		"1.8.0.2"
+#define DRV_VERSION		"1.8.0.3"
 #define PFX			DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
@@ -438,6 +438,7 @@ struct fnic {
 	struct list_head tx_queue;
 	mempool_t *frame_pool;
 	mempool_t *frame_elem_pool;
+	mempool_t *frame_recv_pool;
 	struct work_struct tport_work;
 	struct list_head tport_event_list;
 
@@ -512,7 +513,6 @@ int fnic_host_reset(struct Scsi_Host *shost);
 void fnic_reset(struct Scsi_Host *shost);
 int fnic_issue_fc_host_lip(struct Scsi_Host *shost);
 void fnic_get_host_port_state(struct Scsi_Host *shost);
-void fnic_scsi_fcpio_reset(struct fnic *fnic);
 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index);
 int fnic_wq_cmpl_handler(struct fnic *fnic, int);
 int fnic_flogi_reg_handler(struct fnic *fnic, u32);
@@ -541,7 +541,8 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
 }
 void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
 void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
-void fnic_free_txq(struct list_head *head);
+void fnic_free_txq(struct fnic *fnic);
+void fnic_free_rxq(struct fnic *fnic);
 int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
 						   char **subsys_desc);
 void fnic_fdls_link_status_change(struct fnic *fnic, int linkup);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 405b341b73d7..063eb864a5cd 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -291,7 +291,7 @@ void fnic_handle_frame(struct work_struct *work)
 		if (fnic->stop_rx_link_events) {
 			list_del(&cur_frame->links);
 			spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
-			kfree(cur_frame->fp);
+			mempool_free(cur_frame->fp, fnic->frame_recv_pool);
 			mempool_free(cur_frame, fnic->frame_elem_pool);
 			return;
 		}
@@ -317,7 +317,7 @@ void fnic_handle_frame(struct work_struct *work)
 		fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp,
 							 cur_frame->frame_len, fchdr_offset);
 
-		kfree(cur_frame->fp);
+		mempool_free(cur_frame->fp, fnic->frame_recv_pool);
 		mempool_free(cur_frame, fnic->frame_elem_pool);
 	}
 	spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
@@ -337,8 +337,8 @@ void fnic_handle_fip_frame(struct work_struct *work)
 		if (fnic->stop_rx_link_events) {
 			list_del(&cur_frame->links);
 			spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
-			kfree(cur_frame->fp);
-			kfree(cur_frame);
+			mempool_free(cur_frame->fp, fnic->frame_recv_pool);
+			mempool_free(cur_frame, fnic->frame_elem_pool);
 			return;
 		}
 
@@ -355,8 +355,8 @@ void fnic_handle_fip_frame(struct work_struct *work)
 		list_del(&cur_frame->links);
 
 		if (fdls_fip_recv_frame(fnic, cur_frame->fp)) {
-			kfree(cur_frame->fp);
-			kfree(cur_frame);
+			mempool_free(cur_frame->fp, fnic->frame_recv_pool);
+			mempool_free(cur_frame, fnic->frame_elem_pool);
 		}
 	}
 	spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
@@ -375,10 +375,10 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp)
 
 	eh = (struct ethhdr *) fp;
 	if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) {
-		fip_fr_elem = (struct fnic_frame_list *)
-			kzalloc_obj(struct fnic_frame_list, GFP_ATOMIC);
+		fip_fr_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC);
 		if (!fip_fr_elem)
 			return 0;
+		memset(fip_fr_elem, 0, sizeof(struct fnic_frame_list));
 		fip_fr_elem->fp = fp;
 		spin_lock_irqsave(&fnic->fnic_lock, flags);
 		list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue);
@@ -519,13 +519,13 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
 
 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
-	frame_elem = mempool_alloc(fnic->frame_elem_pool,
-					GFP_ATOMIC | __GFP_ZERO);
+	frame_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC);
 	if (!frame_elem) {
 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
 				 "Failed to allocate memory for frame elem");
 		goto drop;
 	}
+	memset(frame_elem, 0, sizeof(struct fnic_frame_list));
 	frame_elem->fp = fp;
 	frame_elem->rx_ethhdr_stripped = ethhdr_stripped;
 	frame_elem->frame_len = bytes_written;
@@ -538,7 +538,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
 	return;
 
 drop:
-	kfree(fp);
+	mempool_free(fp, fnic->frame_recv_pool);
 }
 
 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
@@ -591,7 +591,7 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
 	int ret;
 
 	len = FNIC_FRAME_HT_ROOM;
-	buf = kmalloc(len, GFP_ATOMIC);
+	buf = mempool_alloc(fnic->frame_recv_pool, GFP_ATOMIC);
 	if (!buf) {
 		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
 					 "Unable to allocate RQ buffer of size: %d\n", len);
@@ -609,7 +609,7 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
 	fnic_queue_rq_desc(rq, buf, pa, len);
 	return 0;
 free_buf:
-	kfree(buf);
+	mempool_free(buf, fnic->frame_recv_pool);
 	return ret;
 }
 
@@ -621,7 +621,7 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
 			 DMA_FROM_DEVICE);
 
-	kfree(rq_buf);
+	mempool_free(rq_buf, fnic->frame_recv_pool);
 	buf->os_buf = NULL;
 }
 
@@ -704,13 +704,13 @@ fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size,
 	 */
 	if ((fnic->state != FNIC_IN_FC_MODE)
 		&& (fnic->state != FNIC_IN_ETH_MODE)) {
-		frame_elem = mempool_alloc(fnic->frame_elem_pool,
-						GFP_ATOMIC | __GFP_ZERO);
+		frame_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC);
 		if (!frame_elem) {
 			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
 				 "Failed to allocate memory for frame elem");
 			return -ENOMEM;
 		}
+		memset(frame_elem, 0, sizeof(struct fnic_frame_list));
 
 		FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
 			"Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n",
@@ -836,14 +836,34 @@ fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
 	return 0;
 }
 
-void fnic_free_txq(struct list_head *head)
+void fnic_free_txq(struct fnic *fnic)
+{
+	struct fnic_frame_list *cur_frame, *next;
+
+	list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) {
+		list_del(&cur_frame->links);
+		mempool_free(cur_frame->fp, fnic->frame_pool);
+		mempool_free(cur_frame, fnic->frame_elem_pool);
+	}
+}
+
+void fnic_free_rxq(struct fnic *fnic)
 {
 	struct fnic_frame_list *cur_frame, *next;
 
-	list_for_each_entry_safe(cur_frame, next, head, links) {
+	list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) {
 		list_del(&cur_frame->links);
-		kfree(cur_frame->fp);
-		kfree(cur_frame);
+		mempool_free(cur_frame->fp, fnic->frame_recv_pool);
+		mempool_free(cur_frame, fnic->frame_elem_pool);
+	}
+
+	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+		list_for_each_entry_safe(cur_frame, next,
+				&fnic->fip_frame_queue, links) {
+			list_del(&cur_frame->links);
+			mempool_free(cur_frame->fp, fnic->frame_recv_pool);
+			mempool_free(cur_frame, fnic->frame_elem_pool);
+		}
 	}
 }
 
@@ -898,7 +918,7 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
 			 DMA_TO_DEVICE);
 
-	kfree(buf->os_buf);
+	mempool_free(buf->os_buf, fnic->frame_pool);
 	buf->os_buf = NULL;
 }
 
@@ -1108,3 +1128,53 @@ void fnic_reset_work_handler(struct work_struct *work)
 	spin_unlock_irqrestore(&reset_fnic_list_lock,
 						   reset_fnic_list_lock_flags);
 }
+
+void fnic_fcpio_reset(struct fnic *fnic)
+{
+	unsigned long flags;
+	enum fnic_state old_state;
+	struct fnic_iport_s *iport = &fnic->iport;
+	DECLARE_COMPLETION_ONSTACK(fw_reset_done);
+	int time_remain;
+
+	/* issue fw reset */
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+		/* fw reset is in progress, poll for its completion */
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+			  "fnic is in unexpected state: %d for fw_reset\n",
+			  fnic->state);
+		return;
+	}
+
+	old_state = fnic->state;
+	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+
+	fnic_update_mac_locked(fnic, iport->hwmac);
+	fnic->fw_reset_done = &fw_reset_done;
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+				"Issuing fw reset\n");
+	if (fnic_fw_reset_handler(fnic)) {
+		spin_lock_irqsave(&fnic->fnic_lock, flags);
+		if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+			fnic->state = old_state;
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+	} else {
+		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+					  "Waiting for fw completion\n");
+		time_remain = wait_for_completion_timeout(&fw_reset_done,
+						  msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT));
+		FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+					  "Woken up after fw completion timeout\n");
+		if (time_remain == 0) {
+			FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+				  "FW reset completion timed out after %d ms\n",
+				  FNIC_FW_RESET_TIMEOUT);
+		}
+		atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts);
+	}
+	fnic->fw_reset_done = NULL;
+}
diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h
index 531d0b37e450..e2959120c4f9 100644
--- a/drivers/scsi/fnic/fnic_fdls.h
+++ b/drivers/scsi/fnic/fnic_fdls.h
@@ -410,6 +410,7 @@ void fnic_fdls_add_tport(struct fnic_iport_s *iport,
 void fnic_fdls_remove_tport(struct fnic_iport_s *iport,
 			    struct fnic_tport_s *tport,
 			    unsigned long flags);
+void fnic_fcpio_reset(struct fnic *fnic);
 
 /* fip.c */
 void fnic_fcoe_send_vlan_req(struct fnic *fnic);
@@ -422,7 +423,6 @@ void fnic_handle_fip_timer(struct timer_list *t);
 extern void fdls_fabric_timer_callback(struct timer_list *t);
 
 /* fnic_scsi.c */
-void fnic_scsi_fcpio_reset(struct fnic *fnic);
 extern void fdls_fabric_timer_callback(struct timer_list *t);
 void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid);
 int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 8b551b79e087..24d62c0874ac 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -40,6 +40,7 @@ static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
 static struct kmem_cache *fnic_io_req_cache;
 static struct kmem_cache *fdls_frame_cache;
 static struct kmem_cache *fdls_frame_elem_cache;
+static struct kmem_cache *fdls_frame_recv_cache;
 static LIST_HEAD(fnic_list);
 static DEFINE_SPINLOCK(fnic_list_lock);
 static DEFINE_IDA(fnic_ida);
@@ -554,6 +555,7 @@ static int fnic_cleanup(struct fnic *fnic)
 	mempool_destroy(fnic->io_req_pool);
 	mempool_destroy(fnic->frame_pool);
 	mempool_destroy(fnic->frame_elem_pool);
+	mempool_destroy(fnic->frame_recv_pool);
 	for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
 		mempool_destroy(fnic->io_sgl_pool[i]);
 
@@ -928,6 +930,14 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	}
 	fnic->frame_elem_pool = pool;
 
+	pool = mempool_create_slab_pool(FDLS_MIN_FRAMES,
+						fdls_frame_recv_cache);
+	if (!pool) {
+		err = -ENOMEM;
+		goto err_out_fdls_frame_recv_pool;
+	}
+	fnic->frame_recv_pool = pool;
+
 	/* setup vlan config, hw inserts vlan header */
 	fnic->vlan_hw_insert = 1;
 	fnic->vlan_id = 0;
@@ -1085,6 +1095,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	}
 	vnic_dev_notify_unset(fnic->vdev);
 err_out_fnic_notify_set:
+	mempool_destroy(fnic->frame_recv_pool);
+err_out_fdls_frame_recv_pool:
 	mempool_destroy(fnic->frame_elem_pool);
 err_out_fdls_frame_elem_pool:
 	mempool_destroy(fnic->frame_pool);
@@ -1157,7 +1169,6 @@ static void fnic_remove(struct pci_dev *pdev)
 		timer_delete_sync(&fnic->enode_ka_timer);
 		timer_delete_sync(&fnic->vn_ka_timer);
 
-		fnic_free_txq(&fnic->fip_frame_queue);
 		fnic_fcoe_reset_vlans(fnic);
 	}
 
@@ -1177,8 +1188,8 @@ static void fnic_remove(struct pci_dev *pdev)
 	list_del(&fnic->list);
 	spin_unlock_irqrestore(&fnic_list_lock, flags);
 
-	fnic_free_txq(&fnic->frame_queue);
-	fnic_free_txq(&fnic->tx_queue);
+	fnic_free_rxq(fnic);
+	fnic_free_txq(fnic);
 
 	vnic_dev_notify_unset(fnic->vdev);
 	fnic_free_intr(fnic);
@@ -1287,6 +1298,15 @@ static int __init fnic_init_module(void)
 		goto err_create_fdls_frame_cache_elem;
 	}
 
+	fdls_frame_recv_cache = kmem_cache_create("fdls_frame_recv",
+						  FNIC_FRAME_HT_ROOM,
+						  0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!fdls_frame_recv_cache) {
+		pr_err("fnic fdls frame recv cach create failed\n");
+		err = -ENOMEM;
+		goto err_create_fdls_frame_recv_cache;
+	}
+
 	fnic_event_queue =
 		alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq");
 	if (!fnic_event_queue) {
@@ -1339,6 +1359,8 @@ static int __init fnic_init_module(void)
 	if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
 		destroy_workqueue(reset_fnic_work_queue);
 err_create_reset_fnic_workq:
+	kmem_cache_destroy(fdls_frame_recv_cache);
+err_create_fdls_frame_recv_cache:
 	destroy_workqueue(fnic_event_queue);
 err_create_fnic_workq:
 	kmem_cache_destroy(fdls_frame_elem_cache);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 29d7aca06958..6ee3c559e129 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -471,7 +471,6 @@ enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost,
 	int sg_count = 0;
 	unsigned long flags = 0;
 	unsigned long ptr;
-	int io_lock_acquired = 0;
 	uint16_t hwq = 0;
 	struct fnic_tport_s *tport = NULL;
 	struct rport_dd_data_s *rdd_data;
@@ -636,7 +635,6 @@ enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost,
 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
 
 	/* initialize rest of io_req */
-	io_lock_acquired = 1;
 	io_req->port_id = rport->port_id;
 	io_req->start_time = jiffies;
 	fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
@@ -689,6 +687,9 @@ enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost,
 		/* REVISIT: Use per IO lock in the final code */
 		fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
 	}
+
+	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+
 out:
 	cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
 			(u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
@@ -699,10 +700,6 @@ enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost,
 		   mqtag, sc, io_req, sg_count, cmd_trace,
 		   fnic_flags_and_state(sc));
 
-	/* if only we issued IO, will we have the io lock */
-	if (io_lock_acquired)
-		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
-
 	atomic_dec(&fnic->in_flight);
 	atomic_dec(&tport->in_flight);
 
@@ -777,7 +774,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 	 */
 	if (ret) {
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-		fnic_free_txq(&fnic->tx_queue);
+		fnic_free_txq(fnic);
 		goto reset_cmpl_handler_end;
 	}
 
@@ -1972,15 +1969,11 @@ void fnic_scsi_unload(struct fnic *fnic)
 	 */
 	spin_lock_irqsave(&fnic->fnic_lock, flags);
 	fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT;
-	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
-	if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT)
-		fnic_scsi_fcpio_reset(fnic);
-
-	spin_lock_irqsave(&fnic->fnic_lock, flags);
 	fnic->in_remove = 1;
 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
+	fnic_fcpio_reset(fnic);
+
 	fnic_flush_tport_event_list(fnic);
 	fnic_delete_fcp_tports(fnic);
 }
@@ -3040,54 +3033,3 @@ int fnic_eh_host_reset_handler(struct scsi_cmnd *sc)
 	ret = fnic_host_reset(shost);
 	return ret;
 }
-
-
-void fnic_scsi_fcpio_reset(struct fnic *fnic)
-{
-	unsigned long flags;
-	enum fnic_state old_state;
-	struct fnic_iport_s *iport = &fnic->iport;
-	DECLARE_COMPLETION_ONSTACK(fw_reset_done);
-	int time_remain;
-
-	/* issue fw reset */
-	spin_lock_irqsave(&fnic->fnic_lock, flags);
-	if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
-		/* fw reset is in progress, poll for its completion */
-		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
-			  "fnic is in unexpected state: %d for fw_reset\n",
-			  fnic->state);
-		return;
-	}
-
-	old_state = fnic->state;
-	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
-
-	fnic_update_mac_locked(fnic, iport->hwmac);
-	fnic->fw_reset_done = &fw_reset_done;
-	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
-	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
-				  "Issuing fw reset\n");
-	if (fnic_fw_reset_handler(fnic)) {
-		spin_lock_irqsave(&fnic->fnic_lock, flags);
-		if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
-			fnic->state = old_state;
-		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-	} else {
-		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
-					  "Waiting for fw completion\n");
-		time_remain = wait_for_completion_timeout(&fw_reset_done,
-						  msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT));
-		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
-					  "Woken up after fw completion timeout\n");
-		if (time_remain == 0) {
-			FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
-				  "FW reset completion timed out after %d ms)\n",
-				  FNIC_FW_RESET_TIMEOUT);
-		}
-		atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts);
-	}
-	fnic->fw_reset_done = NULL;
-}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 30a9c6612651..00e4b59ff711 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1326,7 +1326,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
 
 	if (sts && !wait_for_completion_timeout(&completion,
 		HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
-		dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
+		dev_warn(dev, "phy%d wait phyup timed out for func %u\n",
 			 phy_no, func);
 		if (phy->in_reset)
 			ret = -ETIMEDOUT;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 2f9e01717ef3..ba9d6877483a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -432,7 +432,7 @@
 #define CMPLT_HDR_IPTT_OFF		0
 #define CMPLT_HDR_IPTT_MSK		(0xffff << CMPLT_HDR_IPTT_OFF)
 #define CMPLT_HDR_DEV_ID_OFF		16
-#define CMPLT_HDR_DEV_ID_MSK		(0xffff << CMPLT_HDR_DEV_ID_OFF)
+#define CMPLT_HDR_DEV_ID_MSK		(0xffffU << CMPLT_HDR_DEV_ID_OFF)
 /* dw3 */
 #define SATA_DISK_IN_ERROR_STATUS_OFF	8
 #define SATA_DISK_IN_ERROR_STATUS_MSK	(0x1 << SATA_DISK_IN_ERROR_STATUS_OFF)
@@ -444,7 +444,7 @@
 #define FIS_ATA_STATUS_ERR_OFF		18
 #define FIS_ATA_STATUS_ERR_MSK		(0x1 << FIS_ATA_STATUS_ERR_OFF)
 #define FIS_TYPE_SDB_OFF		31
-#define FIS_TYPE_SDB_MSK		(0x1 << FIS_TYPE_SDB_OFF)
+#define FIS_TYPE_SDB_MSK		(0x1U << FIS_TYPE_SDB_OFF)
 
 /* ITCT header */
 /* qw0 */
@@ -896,7 +896,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
 			qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
 		break;
 	default:
-		dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
+		dev_warn(dev, "setup itct: unsupported dev type (%u)\n",
 			 sas_dev->dev_type);
 	}
 
@@ -2847,7 +2847,7 @@ static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
 static ssize_t intr_conv_v3_hw_show(struct device *dev,
 				    struct device_attribute *attr, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", hisi_sas_intr_conv);
 }
 static DEVICE_ATTR_RO(intr_conv_v3_hw);
 
@@ -3293,7 +3293,7 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
 	u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0];
 	struct device *dev = hisi_hba->dev;
 
-	dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
+	dev_info(dev, "BIST info:phy%u link_rate=%u code_mode=%u path_mode=%u ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
 		 phy_no, linkrate, code_mode, path_mode,
 		 ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS],
 		 ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS],
@@ -3650,7 +3650,7 @@ static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
 	int i;
 
 	for (i = 0; i < reg->count; i++) {
-		int off = i * HISI_SAS_REG_MEM_SIZE;
+		u32 off = i * HISI_SAS_REG_MEM_SIZE;
 		const char *name;
 
 		name = debugfs_to_reg_name_v3_hw(off, reg->base_off,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index b395a9d7c640..61f682800765 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3968,6 +3968,7 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
 
 	.tfc_wwn_attrs			= ibmvscsis_wwn_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7b4fe0e6afb2..9260b1c9b0e0 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -267,7 +267,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 	struct sock *sk = tcp_sw_conn->sock->sk;
 
-	/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
+	/* restore socket callbacks, see also: iscsi_sw_tcp_conn_set_callbacks() */
 	write_lock_bh(&sk->sk_callback_lock);
 	sk->sk_user_data    = NULL;
 	sk->sk_data_ready   = tcp_sw_conn->old_data_ready;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 689793d03c20..49ed55db1e47 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -552,8 +552,6 @@ struct lpfc_cgn_info {
 	);
 
 	__le32   cgn_info_crc;
-#define LPFC_CGN_CRC32_MAGIC_NUMBER	0x1EDC6F41
-#define LPFC_CGN_CRC32_SEED		0xFFFFFFFF
 };
 
 #define LPFC_CGN_INFO_SZ	(sizeof(struct lpfc_cgn_info) -  \
@@ -1667,8 +1665,9 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
  * @mask: Pointer to phba's cpumask member.
  * @start: starting cpu index
  *
- * Note: If no valid cpu found, then nr_cpu_ids is returned.
+ * Returns: next online CPU in @mask on success
  *
+ * Note: If no valid cpu found, then nr_cpu_ids is returned.
  **/
 static __always_inline unsigned int
 lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
@@ -1680,8 +1679,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
  * lpfc_next_present_cpu - Finds next present CPU after n
  * @n: the cpu prior to search
  *
- * Note: If no next present cpu, then fallback to first present cpu.
+ * Returns: next present CPU after CPU @n
  *
+ * Note: If no next present cpu, then fallback to first present cpu.
  **/
 static __always_inline unsigned int lpfc_next_present_cpu(int n)
 {
@@ -1691,7 +1691,7 @@ static __always_inline unsigned int lpfc_next_present_cpu(int n)
 /**
  * lpfc_sli4_mod_hba_eq_delay - update EQ delay
  * @phba: Pointer to HBA context object.
- * @q: The Event Queue to update.
+ * @eq: The Event Queue to update.
  * @delay: The delay value (in us) to be written.
  *
  **/
@@ -1753,8 +1753,9 @@ static const char *routine(enum enum_name table_key)			\
  * Pr Tag     1               0              N
  * Pr Tag     1               1              Y
  * Pr Tag     2               *              Y
- ---------------------------------------------------
+ * ---------------------------------------------------
  *
+ * Returns: whether VMID is enabled
  **/
 static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba)
 {
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index efeb61b15a5b..8a5b76bdea06 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -86,7 +86,7 @@ void lpfc_cmf_stop(struct lpfc_hba *phba);
 void lpfc_init_congestion_stat(struct lpfc_hba *phba);
 void lpfc_init_congestion_buf(struct lpfc_hba *phba);
 int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba);
-uint32_t lpfc_cgn_calc_crc32(void *bufp, uint32_t sz, uint32_t seed);
+uint32_t lpfc_cgn_calc_crc32(const void *data, size_t size);
 int lpfc_config_cgn_signal(struct lpfc_hba *phba);
 int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total);
 void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
@@ -660,6 +660,7 @@ void lpfc_wqe_cmd_template(void);
 void lpfc_nvmet_cmd_template(void);
 void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 			   uint32_t stat, uint32_t param);
+void lpfc_nvme_flush_abts_list(struct lpfc_hba *phba);
 void lpfc_nvmels_flush_cmd(struct lpfc_hba *phba);
 extern int lpfc_enable_nvmet_cnt;
 extern unsigned long long lpfc_enable_nvmet[];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d64f4acfcdae..c7853e7fe071 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -2427,13 +2427,14 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 			/* CGN is only for the physical port, no vports */
 			if (lpfc_fdmi_cmd(vport, ndlp, cmd,
-					  LPFC_FDMI_VENDOR_ATTR_mi) == 0)
+					  LPFC_FDMI_VENDOR_ATTR_mi) == 0) {
 				phba->link_flag |= LS_CT_VEN_RPA;
-			lpfc_printf_log(phba, KERN_INFO,
+				lpfc_printf_log(phba, KERN_INFO,
 					LOG_DISCOVERY | LOG_ELS,
 					"6458 Send MI FDMI:%x Flag x%x\n",
 					phba->sli4_hba.pc_sli4_params.mi_ver,
 					phba->link_flag);
+			}
 		} else {
 			lpfc_printf_log(phba, KERN_INFO,
 					LOG_DISCOVERY | LOG_ELS,
@@ -3214,7 +3215,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		     struct lpfc_iocbq *rspiocb);
 
 	if (!ndlp)
-		return 0;
+		goto fdmi_cmd_exit;
 
 	cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
 
@@ -3320,7 +3321,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		if (vport->port_type != LPFC_PHYSICAL_PORT) {
 			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
 			if (!ndlp)
-				return 0;
+				goto fdmi_cmd_free_rspvirt;
 		}
 		fallthrough;
 	case SLI_MGMT_RPA:
@@ -3396,7 +3397,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		if (vport->port_type != LPFC_PHYSICAL_PORT) {
 			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
 			if (!ndlp)
-				return 0;
+				goto fdmi_cmd_free_rspvirt;
 		}
 		fallthrough;
 	case SLI_MGMT_DPA:
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index de0adeecf668..a377e97cbe65 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -137,7 +137,8 @@ struct lpfc_nodelist {
 	uint16_t	nlp_maxframe;		/* Max RCV frame size */
 	uint8_t		nlp_class_sup;		/* Supported Classes */
 	uint8_t         nlp_retry;		/* used for ELS retries */
-	uint8_t         nlp_fcp_info;	        /* class info, bits 0-3 */
+	uint8_t         nlp_fcp_info;	        /* class info, bits 0-2 */
+#define NLP_FCP_CLASS_MASK 0x07			/* class info bitmask */
 #define NLP_FCP_2_DEVICE   0x10			/* FCP-2 device */
 	u8		nlp_nvme_info;	        /* NVME NSLER Support */
 	uint8_t		vmid_support;		/* destination VMID support */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index cee709617a31..d70a4039a345 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -1107,7 +1107,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		vport->vmid_flag = 0;
 	}
 	if (sp->cmn.priority_tagging)
-		vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
+		vport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
 						  LPFC_VMID_TYPE_PRIO);
 
 	/*
@@ -1303,8 +1303,12 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 				     ndlp->nlp_DID, ELS_CMD_FLOGI);
 
-	if (!elsiocb)
+	if (!elsiocb) {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_DISCOVERY,
+				 "4296 Unable to prepare FLOGI iocb\n");
 		return 1;
+	}
 
 	wqe = &elsiocb->wqe;
 	pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
@@ -1394,10 +1398,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		phba->sli3_options, 0, 0);
 
 	elsiocb->ndlp = lpfc_nlp_get(ndlp);
-	if (!elsiocb->ndlp) {
-		lpfc_els_free_iocb(phba, elsiocb);
-		return 1;
-	}
+	if (!elsiocb->ndlp)
+		goto err_out;
 
 	/* Avoid race with FLOGI completion and hba_flags. */
 	set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
@@ -1407,9 +1409,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (rc == IOCB_ERROR) {
 		clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
 		clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag);
-		lpfc_els_free_iocb(phba, elsiocb);
 		lpfc_nlp_put(ndlp);
-		return 1;
+		goto err_out;
 	}
 
 	/* Clear external loopback plug detected flag */
@@ -1474,6 +1475,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	}
 
 	return 0;
+
+ err_out:
+	lpfc_els_free_iocb(phba, elsiocb);
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_DISCOVERY,
+			 "4297 Issue FLOGI: Cannot send IOCB\n");
+	return 1;
 }
 
 /**
@@ -2641,7 +2649,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		}
 		npr->estabImagePair = 1;
 		npr->readXferRdyDis = 1;
-		if (vport->cfg_first_burst_size)
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    !test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
+		    vport->cfg_first_burst_size)
 			npr->writeXferRdyDis = 1;
 
 		/* For FCP support */
@@ -10291,10 +10301,8 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
 						cpu_to_le16(value);
 					cp->cgn_warn_freq =
 						cpu_to_le16(value);
-					crc = lpfc_cgn_calc_crc32
-						(cp,
-						LPFC_CGN_INFO_SZ,
-						LPFC_CGN_CRC32_SEED);
+					crc = lpfc_cgn_calc_crc32(
+						cp, LPFC_CGN_INFO_SZ);
 					cp->cgn_info_crc = cpu_to_le32(crc);
 				}
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 8aaf05d7bb0a..3fffe9b88e63 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -425,7 +425,6 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
 {
 	if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
 		clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
-		lpfc_nlp_get(ndlp);
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
 				 "8438 Devloss timeout reversed on DID x%x "
 				 "refcnt %d ndlp %p flag x%lx "
@@ -3174,7 +3173,11 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 		return;
 	}
 
-	lpfc_initial_flogi(vport);
+	if (!lpfc_initial_flogi(vport)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_ELS,
+				 "2345 Can't issue initial FLOGI\n");
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	}
 	mempool_free(mboxq, phba->mbox_mem_pool);
 	return;
 }
@@ -3247,8 +3250,14 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 			return;
 	}
 
-	if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
-		lpfc_initial_fdisc(vport);
+	if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+		if (!lpfc_initial_fdisc(vport)) {
+			lpfc_printf_vlog(vport, KERN_WARNING,
+					 LOG_MBOX | LOG_ELS,
+					 "2346 Can't issue initial FDISC\n");
+			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		}
+	}
 	else {
 		lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -5228,12 +5237,11 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
 
 /*
  * Free rpi associated with LPFC_NODELIST entry.
- * This routine is called from lpfc_freenode(), when we are removing
- * a LPFC_NODELIST entry. It is also called if the driver initiates a
- * LOGO that completes successfully, and we are waiting to PLOGI back
- * to the remote NPort. In addition, it is called after we receive
- * and unsolicated ELS cmd, send back a rsp, the rsp completes and
- * we are waiting to PLOGI back to the remote NPort.
+ * This routine is called if the driver initiates a LOGO that completes
+ * successfully, and we are waiting to PLOGI back to the remote NPort.
+ * In addition, it is called after we receive and unsolicated ELS cmd,
+ * send back a rsp, the rsp completes and we are waiting to PLOGI back
+ * to the remote NPort.
  */
 int
 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
@@ -6599,11 +6607,6 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
 	unsigned long flags;
 
 	if (ndlp) {
-		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
-			"node get:        did:x%x flg:x%lx refcnt:x%x",
-			ndlp->nlp_DID, ndlp->nlp_flag,
-			kref_read(&ndlp->kref));
-
 		/* The check of ndlp usage to prevent incrementing the
 		 * ndlp reference count that is in the process of being
 		 * released.
@@ -6611,9 +6614,8 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
 		spin_lock_irqsave(&ndlp->lock, flags);
 		if (!kref_get_unless_zero(&ndlp->kref)) {
 			spin_unlock_irqrestore(&ndlp->lock, flags);
-			lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
-				"0276 %s: ndlp:x%px refcnt:%d\n",
-				__func__, (void *)ndlp, kref_read(&ndlp->kref));
+			pr_info("0276 %s: NDLP x%px has zero reference count. "
+				"Exiting\n", __func__, ndlp);
 			return NULL;
 		}
 		spin_unlock_irqrestore(&ndlp->lock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 94ad253d65a0..3bf522c7f099 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -22,6 +22,7 @@
  *******************************************************************/
 
 #include <linux/blkdev.h>
+#include <linux/crc32.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/idr.h>
@@ -1087,7 +1088,6 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
 	struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
 	struct lpfc_sli4_hdw_queue *qp;
 	LIST_HEAD(aborts);
-	LIST_HEAD(nvme_aborts);
 	LIST_HEAD(nvmet_aborts);
 	struct lpfc_sglq *sglq_entry = NULL;
 	int cnt, idx;
@@ -1946,6 +1946,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
 
 	lpfc_offline_prep(phba, mbx_action);
 	lpfc_sli_flush_io_rings(phba);
+	lpfc_nvme_flush_abts_list(phba);
 	lpfc_nvmels_flush_cmd(phba);
 	lpfc_offline(phba);
 	/* release interrupt for possible resource change */
@@ -5634,8 +5635,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
 		cp->cgn_stat_npm = value;
 	}
 
-	value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
-				    LPFC_CGN_CRC32_SEED);
+	value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
 	cp->cgn_info_crc = cpu_to_le32(value);
 }
 
@@ -5897,8 +5897,7 @@ lpfc_cmf_stats_timer(struct hrtimer *timer)
 	cp->cgn_warn_freq = cpu_to_le16(value);
 	cp->cgn_alarm_freq = cpu_to_le16(value);
 
-	lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
-				     LPFC_CGN_CRC32_SEED);
+	lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
 	cp->cgn_info_crc = cpu_to_le32(lvalue);
 
 	hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC));
@@ -7121,8 +7120,7 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
 			cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
 			cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
 			cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
-			crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
-						  LPFC_CGN_CRC32_SEED);
+			crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
 			cp->cgn_info_crc = cpu_to_le32(crc);
 		}
 		spin_unlock_irq(&phba->hbalock);
@@ -8283,7 +8281,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 			phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
 			phba->cfg_nvme_seg_cnt);
 
-	i = min(phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE);
+	i = min_t(u32, phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE);
 
 	phba->lpfc_sg_dma_buf_pool =
 			dma_pool_create("lpfc_sg_dma_buf_pool",
@@ -13493,54 +13491,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
 		phba->pport->work_port_events = 0;
 }
 
-static uint32_t
-lpfc_cgn_crc32(uint32_t crc, u8 byte)
-{
-	uint32_t msb = 0;
-	uint32_t bit;
-
-	for (bit = 0; bit < 8; bit++) {
-		msb = (crc >> 31) & 1;
-		crc <<= 1;
-
-		if (msb ^ (byte & 1)) {
-			crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
-			crc |= 1;
-		}
-		byte >>= 1;
-	}
-	return crc;
-}
-
-static uint32_t
-lpfc_cgn_reverse_bits(uint32_t wd)
-{
-	uint32_t result = 0;
-	uint32_t i;
-
-	for (i = 0; i < 32; i++) {
-		result <<= 1;
-		result |= (1 & (wd >> i));
-	}
-	return result;
-}
-
 /*
  * The routine corresponds with the algorithm the HBA firmware
  * uses to validate the data integrity.
  */
 uint32_t
-lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
+lpfc_cgn_calc_crc32(const void *data, size_t size)
 {
-	uint32_t  i;
-	uint32_t result;
-	uint8_t  *data = (uint8_t *)ptr;
-
-	for (i = 0; i < byteLen; ++i)
-		crc = lpfc_cgn_crc32(crc, data[i]);
-
-	result = ~lpfc_cgn_reverse_bits(crc);
-	return result;
+	return ~crc32c(~0, data, size);
 }
 
 void
@@ -13589,7 +13547,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
 
 	cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
 	cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
-	crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+	crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
 	cp->cgn_info_crc = cpu_to_le32(crc);
 
 	phba->cgn_evt_timestamp = jiffies +
@@ -13612,7 +13570,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba)
 	memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
 
 	lpfc_cgn_update_tstamp(phba, &cp->stat_start);
-	crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+	crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
 	cp->cgn_info_crc = cpu_to_le32(crc);
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index a6b3b16f870d..74c2820c64f3 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -2846,6 +2846,54 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 #endif
 }
 
+/**
+ * lpfc_nvme_flush_abts_list - Clean up nvme commands from the abts list
+ * @phba: Pointer to HBA context object.
+ *
+ **/
+void
+lpfc_nvme_flush_abts_list(struct lpfc_hba *phba)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct lpfc_io_buf *psb, *psb_next;
+	struct lpfc_sli4_hdw_queue *qp;
+	LIST_HEAD(aborts);
+	int i;
+
+	/* abts_xxxx_buf_list_lock required because worker thread uses this
+	 * list.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+		qp = &phba->sli4_hba.hdwq[i];
+
+		spin_lock(&qp->abts_io_buf_list_lock);
+		list_for_each_entry_safe(psb, psb_next,
+					 &qp->lpfc_abts_io_buf_list, list) {
+			if (!(psb->cur_iocbq.cmd_flag & LPFC_IO_NVME))
+				continue;
+			list_move(&psb->list, &aborts);
+			qp->abts_nvme_io_bufs--;
+		}
+		spin_unlock(&qp->abts_io_buf_list_lock);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
+		list_del_init(&psb->list);
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+				"6195 %s: lpfc_ncmd x%px flags x%x "
+				"cmd_flag x%x xri x%x\n", __func__,
+				psb, psb->flags,
+				psb->cur_iocbq.cmd_flag,
+				psb->cur_iocbq.sli4_xritag);
+		psb->flags &= ~LPFC_SBUF_XBUSY;
+		psb->status = IOSTAT_SUCCESS;
+		lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
+	}
+#endif
+}
+
 /**
  * lpfc_nvmels_flush_cmd - Clean up outstanding nvmels commands for a port
  * @phba: Pointer to HBA context object.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 69bf1ac6f846..e9d27703bc44 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -4665,7 +4665,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
 	else
 		piocbq->iocb.ulpFCP2Rcvy = 0;
 
-	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
+	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & NLP_FCP_CLASS_MASK);
 	piocbq->io_buf  = lpfc_cmd;
 	if (!piocbq->cmd_cmpl)
 		piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
@@ -4777,7 +4777,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
 		bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
 
 	bf_set(wqe_class, &wqe->generic.wqe_com,
-	       (pnode->nlp_fcp_info & 0x0f));
+	       (pnode->nlp_fcp_info & NLP_FCP_CLASS_MASK));
 
 	 /* Word 8 */
 	wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
@@ -4877,7 +4877,7 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
 	piocb->ulpContext = ndlp->nlp_rpi;
 	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
-	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+	piocb->ulpClass = (ndlp->nlp_fcp_info & NLP_FCP_CLASS_MASK);
 	piocb->ulpPU = 0;
 	piocb->un.fcpi.fcpi_parm = 0;
 
@@ -4945,7 +4945,7 @@ lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
 	bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
 	       ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
 	bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
-	       (ndlp->nlp_fcp_info & 0x0f));
+	       (ndlp->nlp_fcp_info & NLP_FCP_CLASS_MASK));
 
 	/* ulpTimeout is only one byte */
 	if (lpfc_cmd->timeout > 0xff) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 1cbfbe44cb7c..bd71292e7480 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -4572,59 +4572,41 @@ void
 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 {
 	LIST_HEAD(tx_completions);
-	LIST_HEAD(txcmplq_completions);
+	spinlock_t *plock;		/* for transmit queue access */
 	struct lpfc_iocbq *iocb, *next_iocb;
 	int offline;
 
-	if (pring->ringno == LPFC_ELS_RING) {
+	if (phba->sli_rev >= LPFC_SLI_REV4)
+		plock = &pring->ring_lock;
+	else
+		plock = &phba->hbalock;
+
+	if (pring->ringno == LPFC_ELS_RING)
 		lpfc_fabric_abort_hba(phba);
-	}
+
 	offline = pci_channel_offline(phba->pcidev);
 
-	/* Error everything on txq and txcmplq
-	 * First do the txq.
-	 */
-	if (phba->sli_rev >= LPFC_SLI_REV4) {
-		spin_lock_irq(&pring->ring_lock);
-		list_splice_init(&pring->txq, &tx_completions);
-		pring->txq_cnt = 0;
+	/* Cancel everything on txq */
+	spin_lock_irq(plock);
+	list_splice_init(&pring->txq, &tx_completions);
+	pring->txq_cnt = 0;
 
-		if (offline) {
-			list_splice_init(&pring->txcmplq,
-					 &txcmplq_completions);
-		} else {
-			/* Next issue ABTS for everything on the txcmplq */
-			list_for_each_entry_safe(iocb, next_iocb,
-						 &pring->txcmplq, list)
-				lpfc_sli_issue_abort_iotag(phba, pring,
-							   iocb, NULL);
-		}
-		spin_unlock_irq(&pring->ring_lock);
+	if (offline) {
+		/* Cancel everything on txcmplq */
+		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+			iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
+		list_splice_init(&pring->txcmplq, &tx_completions);
+		pring->txcmplq_cnt = 0;
 	} else {
-		spin_lock_irq(&phba->hbalock);
-		list_splice_init(&pring->txq, &tx_completions);
-		pring->txq_cnt = 0;
-
-		if (offline) {
-			list_splice_init(&pring->txcmplq, &txcmplq_completions);
-		} else {
-			/* Next issue ABTS for everything on the txcmplq */
-			list_for_each_entry_safe(iocb, next_iocb,
-						 &pring->txcmplq, list)
-				lpfc_sli_issue_abort_iotag(phba, pring,
-							   iocb, NULL);
-		}
-		spin_unlock_irq(&phba->hbalock);
+		/* Issue ABTS for everything on the txcmplq */
+		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+			lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
 	}
+	spin_unlock_irq(plock);
 
-	if (offline) {
-		/* Cancel all the IOCBs from the completions list */
-		lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
-				      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
-	} else {
-		/* Make sure HBA is alive */
+	if (!offline)
 		lpfc_issue_hb_tmo(phba);
-	}
+
 	/* Cancel all the IOCBs from the completions list */
 	lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
 			      IOERR_SLI_ABORTED);
@@ -14736,11 +14718,22 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
 					atomic_read(&tgtp->rcv_fcp_cmd_out),
 					atomic_read(&tgtp->xmt_fcp_release));
 		}
+		hrq->RQ_discard_frm++;
 		fallthrough;
-
 	case FC_STATUS_INSUFF_BUF_NEED_BUF:
+		/* Unexpected event - bump the counter for support. */
 		hrq->RQ_no_posted_buf++;
-		/* Post more buffers if possible */
+
+		lpfc_log_msg(phba, KERN_WARNING,
+			     LOG_ELS | LOG_DISCOVERY | LOG_SLI,
+			     "6423 RQE completion Status x%x, needed x%x "
+			     "discarded x%x\n", status,
+			     hrq->RQ_no_posted_buf - hrq->RQ_discard_frm,
+			     hrq->RQ_discard_frm);
+
+		/* For SLI3, post more buffers if possible. No action for SLI4.
+		 * SLI4 is reposting immediately after processing the RQE.
+		 */
 		set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag);
 		workposted = true;
 		break;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index ee58383492b2..0aa105cab125 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2009-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -246,6 +246,8 @@ struct lpfc_queue {
 	uint32_t q_cnt_2;
 	uint32_t q_cnt_3;
 	uint64_t q_cnt_4;
+	uint32_t q_cnt_5;
+
 /* defines for EQ stats */
 #define	EQ_max_eqe		q_cnt_1
 #define	EQ_no_entry		q_cnt_2
@@ -268,6 +270,7 @@ struct lpfc_queue {
 #define	RQ_no_buf_found		q_cnt_2
 #define	RQ_buf_posted		q_cnt_3
 #define	RQ_rcv_buf		q_cnt_4
+#define RQ_discard_frm		q_cnt_5
 
 	struct work_struct	irqwork;
 	struct work_struct	spwork;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c4ca8bf5843a..31a0cd9db1c2 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "14.4.0.13"
+#define LPFC_DRIVER_VERSION "14.4.0.14"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
 
 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
 		LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2025 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2026 Broadcom. All Rights " \
 		"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
 		"and/or its subsidiaries."
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index ac71ea4898b2..ecd365d78ae3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6365,11 +6365,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
 
 	megasas_setup_jbod_map(instance);
 
-	if (megasas_get_device_list(instance) != SUCCESS) {
-		dev_err(&instance->pdev->dev,
-			"%s: megasas_get_device_list failed\n",
-			__func__);
-		goto fail_get_ld_pd_list;
+	scoped_guard(mutex, &instance->reset_mutex) {
+		if (megasas_get_device_list(instance) != SUCCESS) {
+			dev_err(&instance->pdev->dev,
+				"%s: megasas_get_device_list failed\n",
+				__func__);
+			goto fail_get_ld_pd_list;
+		}
 	}
 
 	/* stream detection initialization */
@@ -6468,7 +6470,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	}
 
 	if (instance->snapdump_wait_time) {
-		megasas_get_snapdump_properties(instance);
+		scoped_guard(mutex, &instance->reset_mutex)
+			megasas_get_snapdump_properties(instance);
 		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
 			 instance->snapdump_wait_time);
 	}
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 6e962092577d..c25525fe0671 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -159,6 +159,7 @@ extern atomic64_t event_counter;
 /* Controller Reset related definitions */
 #define MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT	5
 #define MPI3MR_MAX_RESET_RETRY_COUNT		3
+#define MPI3MR_MAX_SHUTDOWN_RETRY_COUNT		2
 
 /* ResponseCode definitions */
 #define MPI3MR_RI_MASK_RESPCODE		(0x000000FF)
@@ -323,6 +324,7 @@ enum mpi3mr_reset_reason {
 	MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29,
 	MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30,
 	MPI3MR_RESET_FROM_TRIGGER = 31,
+	MPI3MR_RESET_FROM_INVALID_COMPLETION = 32,
 };
 
 #define MPI3MR_RESET_REASON_OSTYPE_LINUX	1
@@ -428,6 +430,14 @@ struct segments {
  * @q_segments: Segment descriptor pointer
  * @q_segment_list: Segment list base virtual address
  * @q_segment_list_dma: Segment list base DMA address
+ * @last_full_host_tag: Hosttag of last IO returned to SML
+ *			due to queue full
+ * @qfull_io_count: Number of IOs returned back to SML
+ *			due to queue full
+ * @qfull_instances: Total queue full occurrences.One occurrence
+ *			starts with queue full detection and ends
+ *			with queue full breaks.
+ *
  */
 struct op_req_qinfo {
 	u16 ci;
@@ -441,6 +451,10 @@ struct op_req_qinfo {
 	struct segments *q_segments;
 	void *q_segment_list;
 	dma_addr_t q_segment_list_dma;
+	u16 last_full_host_tag;
+	u64 qfull_io_count;
+	u32 qfull_instances;
+
 };
 
 /**
@@ -1183,6 +1197,7 @@ struct scmd_priv {
  * @num_tb_segs: Number of Segments in Trace buffer
  * @trace_buf_pool: DMA pool for Segmented trace buffer segments
  * @trace_buf: Trace buffer segments memory descriptor
+ * @invalid_io_comp: Invalid IO completion
  */
 struct mpi3mr_ioc {
 	struct list_head list;
@@ -1394,6 +1409,7 @@ struct mpi3mr_ioc {
 	u32 num_tb_segs;
 	struct dma_pool *trace_buf_pool;
 	struct segments *trace_buf;
+	u8 invalid_io_comp;
 
 };
 
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 81150bef1145..01042eaf0dff 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -996,6 +996,7 @@ static const struct {
 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
 	{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
 	{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
+	{ MPI3MR_RESET_FROM_INVALID_COMPLETION, "invalid cmd completion" },
 };
 
 /**
@@ -2361,6 +2362,9 @@ static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
 	op_req_q->ci = 0;
 	op_req_q->pi = 0;
 	op_req_q->reply_qid = reply_qid;
+	op_req_q->last_full_host_tag =  MPI3MR_HOSTTAG_INVALID;
+	op_req_q->qfull_io_count =  0;
+	op_req_q->qfull_instances =  0;
 	spin_lock_init(&op_req_q->q_lock);
 
 	if (!op_req_q->q_segments) {
@@ -2547,6 +2551,8 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
 	u16 req_sz = mrioc->facts.op_req_sz;
 	struct segments *segments = op_req_q->q_segments;
 	struct op_reply_qinfo *op_reply_q = NULL;
+	struct mpi3_scsi_io_request *scsiio_req =
+		(struct mpi3_scsi_io_request *)req;
 
 	reply_qidx = op_req_q->reply_qid - 1;
 	op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
@@ -2564,11 +2570,21 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
 		mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
 
 		if (mpi3mr_check_req_qfull(op_req_q)) {
+
+			if (op_req_q->last_full_host_tag ==
+			    MPI3MR_HOSTTAG_INVALID)
+				op_req_q->qfull_instances++;
+
+			op_req_q->last_full_host_tag = scsiio_req->host_tag;
+			op_req_q->qfull_io_count++;
 			retval = -EAGAIN;
 			goto out;
 		}
 	}
 
+	if (op_req_q->last_full_host_tag != MPI3MR_HOSTTAG_INVALID)
+		op_req_q->last_full_host_tag = MPI3MR_HOSTTAG_INVALID;
+
 	if (mrioc->reset_in_progress) {
 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
 		retval = -EAGAIN;
@@ -2879,6 +2895,11 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
 		return;
 	}
 
+	if (mrioc->invalid_io_comp) {
+		mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_INVALID_COMPLETION, 1);
+		return;
+	}
+
 	if (atomic_read(&mrioc->admin_pend_isr)) {
 		ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
 				"flush admin replies\n");
@@ -4821,6 +4842,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
 		mrioc->req_qinfo[i].qid = 0;
 		mrioc->req_qinfo[i].reply_qid = 0;
 		spin_lock_init(&mrioc->req_qinfo[i].q_lock);
+		mrioc->req_qinfo[i].last_full_host_tag = 0;
 		mpi3mr_memset_op_req_q_buffers(mrioc, i);
 	}
 
@@ -5036,9 +5058,9 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
  */
 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
 {
-	u32 ioc_config, ioc_status;
-	u8 retval = 1;
-	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
+	u32 ioc_config, ioc_status, shutdown_action;
+	u8 retval = 1, retry = 0;
+	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10, timeout_remaining = 0;
 
 	ioc_info(mrioc, "Issuing shutdown Notification\n");
 	if (mrioc->unrecoverable) {
@@ -5053,14 +5075,16 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
 		return;
 	}
 
+	shutdown_action = MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL |
+	    MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
-	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
-	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
+	ioc_config |= shutdown_action;
 
 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
 
 	if (mrioc->facts.shutdown_timeout)
 		timeout = mrioc->facts.shutdown_timeout * 10;
+	timeout_remaining = timeout;
 
 	do {
 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
@@ -5069,8 +5093,26 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
 			retval = 0;
 			break;
 		}
+		if (mrioc->unrecoverable)
+			break;
+		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+			mpi3mr_print_fault_info(mrioc);
+			if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
+				break;
+			if (mpi3mr_issue_reset(mrioc,
+			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+			    MPI3MR_RESET_FROM_CTLR_CLEANUP))
+				break;
+			ioc_config =
+			    readl(&mrioc->sysif_regs->ioc_configuration);
+			ioc_config |= shutdown_action;
+			writel(ioc_config,
+			    &mrioc->sysif_regs->ioc_configuration);
+			timeout_remaining = timeout;
+			retry++;
+		}
 		msleep(100);
-	} while (--timeout);
+	} while (--timeout_remaining);
 
 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
@@ -5644,6 +5686,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
 	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
 
 out:
+	mrioc->invalid_io_comp = 0;
 	if (!retval) {
 		mrioc->diagsave_timeout = 0;
 		mrioc->reset_in_progress = 0;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 90f8b9d1c2ac..402d1f35d214 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3459,8 +3459,15 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
 	}
 	scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
 	if (!scmd) {
-		panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
-		    mrioc->name, host_tag);
+		ioc_err(mrioc, "Cannot Identify scmd for host_tag 0x%x", host_tag);
+		ioc_err(mrioc,
+		    "reply_desc_type(%d) host_tag(%d(0x%04x)): qid(%d): command issued to\n"
+		    "handle(0x%04x) returned with ioc_status(0x%04x), log_info(0x%08x),\n"
+		    "scsi_state(0x%02x), scsi_status(0x%02x), xfer_count(%d), resp_data(0x%08x)\n",
+		    reply_desc_type, host_tag, host_tag, qidx+1, dev_handle, ioc_status,
+		    ioc_loginfo, scsi_state, scsi_status,  xfer_count,
+		    resp_data);
+		mrioc->invalid_io_comp = 1;
 		goto out;
 	}
 	priv = scsi_cmd_priv(scmd);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 2e584a8bf66b..6a05ce195aa0 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1638,7 +1638,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	int rval = QLA_FUNCTION_FAILED;
-	uint16_t state[6];
+	uint16_t state[16];
 	uint32_t pstate;
 
 	if (IS_QLAFX00(vha->hw)) {
@@ -2402,6 +2402,63 @@ qla2x00_dport_diagnostics_show(struct device *dev,
 	    vha->dport_data[0], vha->dport_data[1],
 	    vha->dport_data[2], vha->dport_data[3]);
 }
+
+static ssize_t
+qla2x00_mpi_fw_state_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int rval = QLA_FUNCTION_FAILED;
+	u16 state[16];
+	u16 mpi_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!(IS_QLA27XX(ha) || IS_QLA28XX(ha)))
+		return scnprintf(buf, PAGE_SIZE,
+				"MPI state reporting is not supported for this HBA.\n");
+
+	memset(state, 0, sizeof(state));
+
+	mutex_lock(&vha->hw->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&vha->hw->optrom_mutex);
+		ql_dbg(ql_dbg_user, vha, 0x70df,
+		       "ISP reset is in progress, failing mpi_fw_state.\n");
+		return -EBUSY;
+	} else if (vha->hw->flags.eeh_busy) {
+		mutex_unlock(&vha->hw->optrom_mutex);
+		ql_dbg(ql_dbg_user, vha, 0x70ea,
+		       "HBA in PCI error state, failing mpi_fw_state.\n");
+		return -EBUSY;
+	}
+
+	rval = qla2x00_get_firmware_state(vha, state);
+	mutex_unlock(&vha->hw->optrom_mutex);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_user, vha, 0x70eb,
+		       "MB Command to retrieve MPI state failed (%d), failing mpi_fw_state.\n",
+				rval);
+		return -EIO;
+	}
+
+	mpi_state = state[11];
+
+	if (!(mpi_state & BIT_15))
+		return scnprintf(buf, PAGE_SIZE,
+				 "MPI firmware state reporting is not supported by this firmware. (0x%02x)\n",
+				mpi_state);
+
+	if (!(mpi_state & BIT_8))
+		return scnprintf(buf, PAGE_SIZE,
+				 "MPI firmware is disabled. (0x%02x)\n",
+				mpi_state);
+
+	return scnprintf(buf, PAGE_SIZE,
+			 "MPI firmware is enabled, state is %s. (0x%02x)\n",
+			 mpi_state & BIT_9 ? "active" : "inactive",
+			 mpi_state);
+}
+
 static DEVICE_ATTR(dport_diagnostics, 0444,
 	   qla2x00_dport_diagnostics_show, NULL);
 
@@ -2469,6 +2526,8 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
     qla2x00_port_speed_store);
 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
+static DEVICE_ATTR(mpi_fw_state, 0444, qla2x00_mpi_fw_state_show, NULL);
+
 
 static struct attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version.attr.attr,
@@ -2517,6 +2576,7 @@ static struct attribute *qla2x00_host_attrs[] = {
 	&dev_attr_qlini_mode.attr,
 	&dev_attr_ql2xiniexchg.attr,
 	&dev_attr_ql2xexchoffld.attr,
+	&dev_attr_mpi_fw_state.attr,
 	NULL,
 };
 
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 730c42b1a7b9..e746c9274cde 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4914,7 +4914,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
 	unsigned long	wtime, mtime, cs84xx_time;
 	uint16_t	min_wait;	/* Minimum wait time if loop is down */
 	uint16_t	wait_time;	/* Wait time if loop is coming ready */
-	uint16_t	state[6];
+	uint16_t	state[16];
 	struct qla_hw_data *ha = vha->hw;
 
 	if (IS_QLAFX00(vha->hw))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 0d598be6f3ea..44e310f1a370 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2268,6 +2268,13 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
 		mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	else
 		mcp->in_mb = MBX_1|MBX_0;
+
+	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+		mcp->mb[12] = 0;
+		mcp->out_mb |= MBX_12;
+		mcp->in_mb |= MBX_12;
+	}
+
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -2280,6 +2287,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
 		states[3] = mcp->mb[4];
 		states[4] = mcp->mb[5];
 		states[5] = mcp->mb[6];  /* DPORT status */
+		if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+			states[11] = mcp->mb[12]; /* MPI state. */
 	}
 
 	if (rval != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 28df9025def0..3be23ed067e6 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1841,6 +1841,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
 	.tfc_tpg_base_attrs		= tcm_qla2xxx_tpg_attrs,
 	.tfc_tpg_attrib_attrs		= tcm_qla2xxx_tpg_attrib_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
@@ -1881,6 +1882,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
 
 	.tfc_wwn_attrs			= tcm_qla2xxx_wwn_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d3a8cd4166f9..6e8c7a42603e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -13,6 +13,7 @@
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
 #include <linux/completion.h>
+#include <linux/ctype.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/init.h>
@@ -3460,6 +3461,52 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
 }
 EXPORT_SYMBOL(scsi_vpd_lun_id);
 
+/**
+ * scsi_vpd_lun_serial - return a unique device serial number
+ * @sdev: SCSI device
+ * @sn:   buffer for the serial number
+ * @sn_size: size of the buffer
+ *
+ * Copies the device serial number into @sn based on the information in
+ * the VPD page 0x80 of the device. The string will be null terminated
+ * and have leading and trailing whitespace stripped.
+ *
+ * Returns the length of the serial number or error on failure.
+ */
+int scsi_vpd_lun_serial(struct scsi_device *sdev, char *sn, size_t sn_size)
+{
+	const struct scsi_vpd *vpd_pg80;
+	const unsigned char *d;
+	int len;
+
+	guard(rcu)();
+	vpd_pg80 = rcu_dereference(sdev->vpd_pg80);
+	if (!vpd_pg80)
+		return -ENXIO;
+
+	len = vpd_pg80->len - 4;
+	d = vpd_pg80->data + 4;
+
+	/* Skip leading spaces */
+	while (len > 0 && isspace(*d)) {
+		len--;
+		d++;
+	}
+
+	/* Skip trailing spaces */
+	while (len > 0 && isspace(d[len - 1]))
+		len--;
+
+	if (sn_size < len + 1)
+		return -EINVAL;
+
+	memcpy(sn, d, len);
+	sn[len] = '\0';
+
+	return len;
+}
+EXPORT_SYMBOL(scsi_vpd_lun_serial);
+
 /**
  * scsi_vpd_tpg_id - return a target port group identifier
  * @sdev: SCSI device
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 60c06fa4ec32..efcaf85ff699 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1943,7 +1943,6 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
 {
 	struct async_scan_data *data = NULL;
-	unsigned long flags;
 
 	if (strncmp(scsi_scan_type, "sync", 4) == 0)
 		return NULL;
@@ -1962,9 +1961,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
 		goto err;
 	init_completion(&data->prev_finished);
 
-	spin_lock_irqsave(shost->host_lock, flags);
-	shost->async_scan = 1;
-	spin_unlock_irqrestore(shost->host_lock, flags);
+	shost->async_scan = true;
 	mutex_unlock(&shost->scan_mutex);
 
 	spin_lock(&async_scan_lock);
@@ -1992,7 +1989,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
 static void scsi_finish_async_scan(struct async_scan_data *data)
 {
 	struct Scsi_Host *shost;
-	unsigned long flags;
 
 	if (!data)
 		return;
@@ -2012,9 +2008,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
 
 	scsi_sysfs_add_devices(shost);
 
-	spin_lock_irqsave(shost->host_lock, flags);
-	shost->async_scan = 0;
-	spin_unlock_irqrestore(shost->host_lock, flags);
+	shost->async_scan = false;
 
 	mutex_unlock(&shost->scan_mutex);
 
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6b8c5c05f294..dfc3559e7e04 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1051,6 +1051,21 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
 
+static ssize_t
+sdev_show_serial(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	ssize_t ret;
+
+	ret = scsi_vpd_lun_serial(sdev, buf, PAGE_SIZE - 1);
+	if (ret < 0)
+		return ret;
+
+	buf[ret] = '\n';
+	return ret + 1;
+}
+static DEVICE_ATTR(serial, S_IRUGO, sdev_show_serial, NULL);
+
 #define BLIST_FLAG_NAME(name)					\
 	[const_ilog2((__force __u64)BLIST_##name)] = #name
 static const char *const sdev_bflags_name[] = {
@@ -1295,6 +1310,7 @@ static struct attribute *scsi_sdev_attrs[] = {
 	&dev_attr_device_busy.attr,
 	&dev_attr_vendor.attr,
 	&dev_attr_model.attr,
+	&dev_attr_serial.attr,
 	&dev_attr_rev.attr,
 	&dev_attr_rescan.attr,
 	&dev_attr_delete.attr,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 628a1d0a74ba..205877b1f8aa 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -107,8 +107,11 @@ static void sd_config_write_same(struct scsi_disk *sdkp,
 static void  sd_revalidate_disk(struct gendisk *);
 
 static DEFINE_IDA(sd_index_ida);
+static DEFINE_MUTEX(sd_mutex_lock);
 
 static mempool_t *sd_page_pool;
+static mempool_t *sd_large_page_pool;
+static atomic_t sd_large_page_pool_users = ATOMIC_INIT(0);
 static struct lock_class_key sd_bio_compl_lkclass;
 
 static const char *sd_cache_types[] = {
@@ -116,6 +119,33 @@ static const char *sd_cache_types[] = {
 	"write back, no read (daft)"
 };
 
+static int sd_large_pool_create(void)
+{
+	mutex_lock(&sd_mutex_lock);
+	if (!sd_large_page_pool) {
+		sd_large_page_pool = mempool_create_page_pool(
+			SD_MEMPOOL_SIZE, get_order(BLK_MAX_BLOCK_SIZE));
+		if (!sd_large_page_pool) {
+			printk(KERN_ERR "sd: can't create large page mempool\n");
+			mutex_unlock(&sd_mutex_lock);
+			return -ENOMEM;
+		}
+	}
+	atomic_inc(&sd_large_page_pool_users);
+	mutex_unlock(&sd_mutex_lock);
+	return 0;
+}
+
+static void sd_large_pool_destroy(void)
+{
+	mutex_lock(&sd_mutex_lock);
+	if (atomic_dec_and_test(&sd_large_page_pool_users)) {
+		mempool_destroy(sd_large_page_pool);
+		sd_large_page_pool = NULL;
+	}
+	mutex_unlock(&sd_mutex_lock);
+}
+
 static void sd_disable_discard(struct scsi_disk *sdkp)
 {
 	sdkp->provisioning_mode = SD_LBP_DISABLE;
@@ -928,14 +958,24 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
 	return protect;
 }
 
-static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
+static void *sd_set_special_bvec(struct scsi_cmnd *cmd, unsigned int data_len)
 {
 	struct page *page;
+	struct request *rq = scsi_cmd_to_rq(cmd);
+	struct scsi_device *sdp = cmd->device;
+	unsigned sector_size = sdp->sector_size;
+	unsigned int nr_pages = DIV_ROUND_UP(sector_size, PAGE_SIZE);
+	int n;
 
-	page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
+	if (sector_size > PAGE_SIZE)
+		page = mempool_alloc(sd_large_page_pool, GFP_ATOMIC);
+	else
+		page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!page)
 		return NULL;
-	clear_highpage(page);
+
+	for (n = 0; n < nr_pages; n++)
+		clear_highpage(page + n);
 	bvec_set_page(&rq->special_vec, page, data_len, 0);
 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
 	return bvec_virt(&rq->special_vec);
@@ -951,7 +991,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
 	unsigned int data_len = 24;
 	char *buf;
 
-	buf = sd_set_special_bvec(rq, data_len);
+	buf = sd_set_special_bvec(cmd, data_len);
 	if (!buf)
 		return BLK_STS_RESOURCE;
 
@@ -1040,7 +1080,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
 	u32 data_len = sdp->sector_size;
 
-	if (!sd_set_special_bvec(rq, data_len))
+	if (!sd_set_special_bvec(cmd, data_len))
 		return BLK_STS_RESOURCE;
 
 	cmd->cmd_len = 16;
@@ -1067,7 +1107,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
 	u32 data_len = sdp->sector_size;
 
-	if (!sd_set_special_bvec(rq, data_len))
+	if (!sd_set_special_bvec(cmd, data_len))
 		return BLK_STS_RESOURCE;
 
 	cmd->cmd_len = 10;
@@ -1513,9 +1553,15 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
 {
 	struct request *rq = scsi_cmd_to_rq(SCpnt);
+	struct scsi_device *sdp = SCpnt->device;
+	unsigned sector_size = sdp->sector_size;
 
-	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
-		mempool_free(rq->special_vec.bv_page, sd_page_pool);
+	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
+		if (sector_size > PAGE_SIZE)
+			mempool_free(rq->special_vec.bv_page, sd_large_page_pool);
+		else
+			mempool_free(rq->special_vec.bv_page, sd_page_pool);
+	}
 }
 
 static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp)
@@ -2912,10 +2958,7 @@ sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim,
 			  "Sector size 0 reported, assuming 512.\n");
 	}
 
-	if (sector_size != 512 &&
-	    sector_size != 1024 &&
-	    sector_size != 2048 &&
-	    sector_size != 4096) {
+	if (blk_validate_block_size(sector_size)) {
 		sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
 			  sector_size);
 		/*
@@ -4043,6 +4086,12 @@ static int sd_probe(struct scsi_device *sdp)
 	sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
 
 	sd_revalidate_disk(gd);
+	if (sdp->sector_size > PAGE_SIZE) {
+		if (sd_large_pool_create()) {
+			error = -ENOMEM;
+			goto out_free_index;
+		}
+	}
 
 	if (sdp->removable) {
 		gd->flags |= GENHD_FL_REMOVABLE;
@@ -4060,6 +4109,8 @@ static int sd_probe(struct scsi_device *sdp)
 	if (error) {
 		device_unregister(&sdkp->disk_dev);
 		put_disk(gd);
+		if (sdp->sector_size > PAGE_SIZE)
+			sd_large_pool_destroy();
 		goto out;
 	}
 
@@ -4212,6 +4263,9 @@ static void sd_remove(struct scsi_device *sdp)
 		sd_shutdown(sdp);
 
 	put_disk(sdkp->disk);
+
+	if (sdp->sector_size > PAGE_SIZE)
+		sd_large_pool_destroy();
 }
 
 static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
@@ -4435,6 +4489,8 @@ static void __exit exit_sd(void)
 
 	scsi_unregister_driver(&sd_template);
 	mempool_destroy(sd_page_pool);
+	if (sd_large_page_pool)
+		mempool_destroy(sd_large_page_pool);
 
 	class_unregister(&sd_disk_class);
 
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 37bac49f30f0..2b4b2a1a8e44 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -81,14 +81,14 @@ static int sg_proc_init(void);
 
 #define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
 
-static int sg_big_buff = SG_DEF_RESERVED_SIZE;
 /* N.B. This variable is readable and writeable via
-   /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
-   of this size (or less if there is not enough memory) will be reserved
-   for use by this file descriptor. [Deprecated usage: this variable is also
-   readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
-   the kernel (i.e. it is not a module).] */
-static int def_reserved_size = -1;	/* picks up init parameter */
+ * /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
+ * of this size (or less if there is not enough memory) will be reserved
+ * for use by this file descriptor.
+ */
+
+/* picks up init parameter */
+static int def_reserved_size = SG_DEF_RESERVED_SIZE;
 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
 
 static int scatter_elem_sz = SG_SCATTER_SZ;
@@ -1623,10 +1623,35 @@ sg_remove_device(struct device *cl_dev)
 }
 
 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
-module_param_named(def_reserved_size, def_reserved_size, int,
-		   S_IRUGO | S_IWUSR);
 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
 
+static int def_reserved_size_set(const char *val, const struct kernel_param *kp)
+{
+	int size, ret;
+
+	if (!val)
+		return -EINVAL;
+
+	ret = kstrtoint(val, 0, &size);
+	if (ret)
+		return ret;
+
+	/* limit to 1 MB */
+	if (size < 0 || size > 1048576)
+		return -ERANGE;
+
+	def_reserved_size = size;
+	return 0;
+}
+
+static const struct kernel_param_ops def_reserved_size_ops = {
+	.set	= def_reserved_size_set,
+	.get	= param_get_int,
+};
+
+module_param_cb(def_reserved_size, &def_reserved_size_ops, &def_reserved_size,
+		   S_IRUGO | S_IWUSR);
+
 MODULE_AUTHOR("Douglas Gilbert");
 MODULE_DESCRIPTION("SCSI generic (sg) driver");
 MODULE_LICENSE("GPL");
@@ -1638,35 +1663,6 @@ MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
 
-#ifdef CONFIG_SYSCTL
-#include <linux/sysctl.h>
-
-static const struct ctl_table sg_sysctls[] = {
-	{
-		.procname	= "sg-big-buff",
-		.data		= &sg_big_buff,
-		.maxlen		= sizeof(int),
-		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
-	},
-};
-
-static struct ctl_table_header *hdr;
-static void register_sg_sysctls(void)
-{
-	if (!hdr)
-		hdr = register_sysctl("kernel", sg_sysctls);
-}
-
-static void unregister_sg_sysctls(void)
-{
-	unregister_sysctl_table(hdr);
-}
-#else
-#define register_sg_sysctls() do { } while (0)
-#define unregister_sg_sysctls() do { } while (0)
-#endif /* CONFIG_SYSCTL */
-
 static int __init
 init_sg(void)
 {
@@ -1676,10 +1672,6 @@ init_sg(void)
 		scatter_elem_sz = PAGE_SIZE;
 		scatter_elem_sz_prev = scatter_elem_sz;
 	}
-	if (def_reserved_size >= 0)
-		sg_big_buff = def_reserved_size;
-	else
-		def_reserved_size = sg_big_buff;
 
 	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 
 				    SG_MAX_DEVS, "sg");
@@ -1697,7 +1689,6 @@ init_sg(void)
 		return 0;
 	}
 	class_unregister(&sg_sysfs_class);
-	register_sg_sysctls();
 err_out:
 	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
 	return rc;
@@ -1706,7 +1697,6 @@ init_sg(void)
 static void __exit
 exit_sg(void)
 {
-	unregister_sg_sysctls();
 #ifdef CONFIG_SCSI_PROC_FS
 	remove_proc_subtree("scsi/sg", NULL);
 #endif				/* CONFIG_SCSI_PROC_FS */
@@ -2182,10 +2172,8 @@ sg_add_sfp(Sg_device * sdp)
 	write_unlock_irqrestore(&sdp->sfd_lock, iflags);
 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
 				      "sg_add_sfp: sfp=0x%p\n", sfp));
-	if (unlikely(sg_big_buff != def_reserved_size))
-		sg_big_buff = def_reserved_size;
 
-	bufflen = min_t(int, sg_big_buff,
+	bufflen = min_t(int, def_reserved_size,
 			max_sectors_bytes(sdp->device->request_queue));
 	sg_build_reserve(sfp, bufflen);
 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -2413,7 +2401,7 @@ sg_proc_write_adio(struct file *filp, const char __user *buffer,
 
 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
 {
-	return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
+	return single_open(file, sg_proc_seq_show_int, &def_reserved_size);
 }
 
 static ssize_t 
@@ -2430,7 +2418,7 @@ sg_proc_write_dressz(struct file *filp, const char __user *buffer,
 	if (err)
 		return err;
 	if (k <= 1048576) {	/* limit "big buff" to 1 MB */
-		sg_big_buff = k;
+		def_reserved_size = k;
 		return count;
 	}
 	return -ERANGE;
@@ -2603,7 +2591,7 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
 
 	if (it && (0 == it->index))
 		seq_printf(s, "max_active_device=%d  def_reserved_size=%d\n",
-			   (int)it->max, sg_big_buff);
+			   (int)it->max, def_reserved_size);
 
 	read_lock_irqsave(&sg_index_lock, iflags);
 	sdp = it ? sg_lookup_dev(it->index) : NULL;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 0ed8558dad72..5fdaa71f0652 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -233,7 +233,6 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
 	virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
 };
 
-static void virtscsi_handle_event(struct work_struct *work);
 
 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
 			       struct virtio_scsi_event_node *event_node)
@@ -242,7 +241,6 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
 	struct scatterlist sg;
 	unsigned long flags;
 
-	INIT_WORK(&event_node->work, virtscsi_handle_event);
 	sg_init_one(&sg, event_node->event, sizeof(struct virtio_scsi_event));
 
 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
@@ -984,8 +982,10 @@ static int virtscsi_probe(struct virtio_device *vdev)
 
 	virtio_device_ready(vdev);
 
-	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
-		virtscsi_kick_event_all(vscsi);
+	for (int i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
+		INIT_WORK(&vscsi->event_list[i].work, virtscsi_handle_event);
+
+	virtscsi_kick_event_all(vscsi);
 
 	scsi_scan_host(shost);
 	return 0;
@@ -1002,8 +1002,7 @@ static void virtscsi_remove(struct virtio_device *vdev)
 	struct Scsi_Host *shost = virtio_scsi_host(vdev);
 	struct virtio_scsi *vscsi = shost_priv(shost);
 
-	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
-		virtscsi_cancel_event_work(vscsi);
+	virtscsi_cancel_event_work(vscsi);
 
 	scsi_remove_host(shost);
 	virtscsi_remove_vqs(vdev);
@@ -1029,8 +1028,7 @@ static int virtscsi_restore(struct virtio_device *vdev)
 
 	virtio_device_ready(vdev);
 
-	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
-		virtscsi_kick_event_all(vscsi);
+	virtscsi_kick_event_all(vscsi);
 
 	return err;
 }
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index efe8cdb20060..704ec94383c3 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1591,6 +1591,7 @@ const struct target_core_fabric_ops iscsi_ops = {
 
 	.write_pending_must_be_called	= 1,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index d668bd19fd4a..e3b61b88471a 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -1107,6 +1107,7 @@ static const struct target_core_fabric_ops loop_ops = {
 	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
 	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
 	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_QUEUE_SUBMIT,
 	.direct_submit_supp		= 0,
 };
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index ad1da7edbb08..896fc0f0379f 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -2278,6 +2278,7 @@ static const struct target_core_fabric_ops sbp_ops = {
 	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
 	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 17608ea39d5a..f514fa2e80dd 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -578,6 +578,7 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
 DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
+DEF_CONFIGFS_ATTRIB_SHOW(complete_type);
 DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len);
 DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment);
 DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity);
@@ -1269,6 +1270,24 @@ static ssize_t submit_type_store(struct config_item *item, const char *page,
 	return count;
 }
 
+static ssize_t complete_type_store(struct config_item *item, const char *page,
+				   size_t count)
+{
+	struct se_dev_attrib *da = to_attrib(item);
+	int ret;
+	u8 val;
+
+	ret = kstrtou8(page, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val > TARGET_QUEUE_COMPL)
+		return -EINVAL;
+
+	da->complete_type = val;
+	return count;
+}
+
 CONFIGFS_ATTR(, emulate_model_alias);
 CONFIGFS_ATTR(, emulate_dpo);
 CONFIGFS_ATTR(, emulate_fua_write);
@@ -1305,6 +1324,7 @@ CONFIGFS_ATTR(, max_write_same_len);
 CONFIGFS_ATTR(, alua_support);
 CONFIGFS_ATTR(, pgr_support);
 CONFIGFS_ATTR(, submit_type);
+CONFIGFS_ATTR(, complete_type);
 CONFIGFS_ATTR_RO(, atomic_max_len);
 CONFIGFS_ATTR_RO(, atomic_alignment);
 CONFIGFS_ATTR_RO(, atomic_granularity);
@@ -1353,6 +1373,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
 	&attr_pgr_support,
 	&attr_emulate_rsoc,
 	&attr_submit_type,
+	&attr_complete_type,
 	&attr_atomic_alignment,
 	&attr_atomic_max_len,
 	&attr_atomic_granularity,
@@ -1376,6 +1397,7 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
 	&attr_alua_support,
 	&attr_pgr_support,
 	&attr_submit_type,
+	&attr_complete_type,
 	NULL,
 };
 EXPORT_SYMBOL(passthrough_attrib_attrs);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 74c6383f9eed..9db2201aa553 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -813,6 +813,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 				DA_UNMAP_ZEROES_DATA_DEFAULT;
 	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
 	dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
+	dev->dev_attrib.complete_type = TARGET_FABRIC_DEFAULT_COMPL;
 
 	/* Skip allocating lun_stats since we can't export them. */
 	xcopy_lun = &dev->xcopy_lun;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 331689b30f85..166dbf4c4061 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -1065,6 +1065,28 @@ target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
 }
 CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
 
+static ssize_t
+target_fabric_wwn_default_complete_type_show(struct config_item *item,
+					     char *page)
+{
+	struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+					  param_group);
+	return sysfs_emit(page, "%u\n",
+			  wwn->wwn_tf->tf_ops->default_compl_type);
+}
+CONFIGFS_ATTR_RO(target_fabric_wwn_, default_complete_type);
+
+static ssize_t
+target_fabric_wwn_direct_complete_supported_show(struct config_item *item,
+						 char *page)
+{
+	struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+					  param_group);
+	return sysfs_emit(page, "%u\n",
+			  wwn->wwn_tf->tf_ops->direct_compl_supp);
+}
+CONFIGFS_ATTR_RO(target_fabric_wwn_, direct_complete_supported);
+
 static ssize_t
 target_fabric_wwn_default_submit_type_show(struct config_item *item,
 					   char *page)
@@ -1089,6 +1111,8 @@ CONFIGFS_ATTR_RO(target_fabric_wwn_, direct_submit_supported);
 
 static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
 	&target_fabric_wwn_attr_cmd_completion_affinity,
+	&target_fabric_wwn_attr_default_complete_type,
+	&target_fabric_wwn_attr_direct_complete_supported,
 	&target_fabric_wwn_attr_default_submit_type,
 	&target_fabric_wwn_attr_direct_submit_supported,
 	NULL,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index abe91dc8722e..21f5cb86d70c 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1187,7 +1187,8 @@ sbc_execute_unmap(struct se_cmd *cmd)
 			goto err;
 		}
 
-		if (lba + range > dev->transport->get_blocks(dev) + 1) {
+		if (lba + range < lba ||
+		    lba + range > dev->transport->get_blocks(dev) + 1) {
 			ret = TCM_ADDRESS_OUT_OF_RANGE;
 			goto err;
 		}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index a7330c4fedde..fad03a15c969 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -902,13 +902,59 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
 	return false;
 }
 
+static void target_complete(struct se_cmd *cmd, int success)
+{
+	struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
+	struct se_dev_attrib *da;
+	u8 compl_type;
+	int cpu;
+
+	if (!wwn) {
+		cpu = cmd->cpuid;
+		goto queue_work;
+	}
+
+	da = &cmd->se_dev->dev_attrib;
+	if (da->complete_type == TARGET_FABRIC_DEFAULT_COMPL)
+		compl_type = wwn->wwn_tf->tf_ops->default_compl_type;
+	else if (da->complete_type == TARGET_DIRECT_COMPL &&
+		 wwn->wwn_tf->tf_ops->direct_compl_supp)
+		compl_type = TARGET_DIRECT_COMPL;
+	else
+		compl_type = TARGET_QUEUE_COMPL;
+
+	if (compl_type == TARGET_DIRECT_COMPL) {
+		/*
+		 * Failure handling and processing secondary stages of
+		 * complex commands can be too heavy to handle from the
+		 * fabric driver so always defer.
+		 */
+		if (success && !cmd->transport_complete_callback) {
+			target_complete_ok_work(&cmd->work);
+			return;
+		}
+
+		compl_type = TARGET_QUEUE_COMPL;
+	}
+
+queue_work:
+	INIT_WORK(&cmd->work, success ? target_complete_ok_work :
+		  target_complete_failure_work);
+
+	if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+		cpu = cmd->cpuid;
+	else
+		cpu = wwn->cmd_compl_affinity;
+
+	queue_work_on(cpu, target_completion_wq, &cmd->work);
+}
+
 /* May be called from interrupt context so must not sleep. */
 void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
 				    sense_reason_t sense_reason)
 {
-	struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
-	int success, cpu;
 	unsigned long flags;
+	int success;
 
 	if (target_cmd_interrupted(cmd))
 		return;
@@ -933,15 +979,7 @@ void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
 	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-	INIT_WORK(&cmd->work, success ? target_complete_ok_work :
-		  target_complete_failure_work);
-
-	if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
-		cpu = cmd->cpuid;
-	else
-		cpu = wwn->cmd_compl_affinity;
-
-	queue_work_on(cpu, target_completion_wq, &cmd->work);
+	target_complete(cmd, success);
 }
 EXPORT_SYMBOL(target_complete_cmd_with_sense);
 
@@ -1112,7 +1150,7 @@ void transport_dump_vpd_proto_id(
 	}
 
 	if (p_buf)
-		strncpy(p_buf, buf, p_buf_len);
+		strscpy(p_buf, buf, p_buf_len);
 	else
 		pr_debug("%s", buf);
 }
@@ -1162,7 +1200,7 @@ int transport_dump_vpd_assoc(
 	}
 
 	if (p_buf)
-		strncpy(p_buf, buf, p_buf_len);
+		strscpy(p_buf, buf, p_buf_len);
 	else
 		pr_debug("%s", buf);
 
@@ -1222,7 +1260,7 @@ int transport_dump_vpd_ident_type(
 	if (p_buf) {
 		if (p_buf_len < strlen(buf)+1)
 			return -EINVAL;
-		strncpy(p_buf, buf, p_buf_len);
+		strscpy(p_buf, buf, p_buf_len);
 	} else {
 		pr_debug("%s", buf);
 	}
@@ -1276,7 +1314,7 @@ int transport_dump_vpd_ident(
 	}
 
 	if (p_buf)
-		strncpy(p_buf, buf, p_buf_len);
+		strscpy(p_buf, buf, p_buf_len);
 	else
 		pr_debug("%s", buf);
 
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 88cf1e5a5810..3920fb02d9fc 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -434,6 +434,7 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
 	.tfc_wwn_attrs			= ft_wwn_attrs,
 	.tfc_tpg_nacl_base_attrs	= ft_nacl_base_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index 51e1867e524e..ce7d16d2cf35 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-$(CONFIG_SCSI_UFSHCD)		+= ufshcd-core.o
-ufshcd-core-y				+= ufshcd.o ufs-sysfs.o ufs-mcq.o
+ufshcd-core-y				+= ufshcd.o ufs-sysfs.o ufs-mcq.o ufs-txeq.o
 ufshcd-core-$(CONFIG_RPMB)		+= ufs-rpmb.o
 ufshcd-core-$(CONFIG_DEBUG_FS)		+= ufs-debugfs.o
 ufshcd-core-$(CONFIG_SCSI_UFS_BSG)	+= ufs_bsg.o
diff --git a/drivers/ufs/core/ufs-debugfs.c b/drivers/ufs/core/ufs-debugfs.c
index e3baed6c70bd..e3dd81d6fe82 100644
--- a/drivers/ufs/core/ufs-debugfs.c
+++ b/drivers/ufs/core/ufs-debugfs.c
@@ -209,6 +209,265 @@ static const struct ufs_debugfs_attr ufs_attrs[] = {
 	{ }
 };
 
+static int ufs_tx_eq_params_show(struct seq_file *s, void *data)
+{
+	const char *file_name = s->file->f_path.dentry->d_name.name;
+	u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private;
+	struct ufs_hba *hba = hba_from_file(s->file);
+	struct ufshcd_tx_eq_settings *settings;
+	struct ufs_pa_layer_attr *pwr_info;
+	struct ufshcd_tx_eq_params *params;
+	u32 rate = hba->pwr_info.hs_rate;
+	u32 num_lanes;
+	int lane;
+
+	if (!ufshcd_is_tx_eq_supported(hba))
+		return -EOPNOTSUPP;
+
+	if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) {
+		seq_printf(s, "Invalid gear selected: %u\n", gear);
+		return 0;
+	}
+
+	if (!hba->max_pwr_info.is_valid) {
+		seq_puts(s, "Max power info is invalid\n");
+		return 0;
+	}
+
+	pwr_info = &hba->max_pwr_info.info;
+	params = &hba->tx_eq_params[gear - 1];
+	if (!params->is_valid) {
+		seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n",
+			   gear, ufs_hs_rate_to_str(rate));
+		return 0;
+	}
+
+	if (strcmp(file_name, "host_tx_eq_params") == 0) {
+		settings = params->host;
+		num_lanes = pwr_info->lane_tx;
+		seq_printf(s, "Host TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n",
+			   hba->host_preshoot_cap, hba->host_deemphasis_cap);
+	} else if (strcmp(file_name, "device_tx_eq_params") == 0) {
+		settings = params->device;
+		num_lanes = pwr_info->lane_rx;
+		seq_printf(s, "Device TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n",
+			   hba->device_preshoot_cap, hba->device_deemphasis_cap);
+	} else {
+		return -ENOENT;
+	}
+
+	seq_printf(s, "TX EQ setting for HS-G%u, Rate-%s:\n", gear,
+		   ufs_hs_rate_to_str(rate));
+	for (lane = 0; lane < num_lanes; lane++)
+		seq_printf(s, "TX Lane %d - PreShoot: %d, DeEmphasis: %d, Pre-Coding %senabled\n",
+			   lane, settings[lane].preshoot,
+			   settings[lane].deemphasis,
+			   settings[lane].precode_en ? "" : "not ");
+
+	return 0;
+}
+
+static int ufs_tx_eq_params_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufs_tx_eq_params_show, inode->i_private);
+}
+
+static const struct file_operations ufs_tx_eq_params_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ufs_tx_eq_params_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static const struct ufs_debugfs_attr ufs_tx_eq_attrs[] = {
+	{ "host_tx_eq_params", 0400, &ufs_tx_eq_params_fops },
+	{ "device_tx_eq_params", 0400, &ufs_tx_eq_params_fops },
+	{ }
+};
+
+static int ufs_tx_eqtr_record_show(struct seq_file *s, void *data)
+{
+	const char *file_name = s->file->f_path.dentry->d_name.name;
+	u8 (*fom_array)[TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
+	u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private;
+	unsigned long preshoot_bitmap, deemphasis_bitmap;
+	struct ufs_hba *hba = hba_from_file(s->file);
+	struct ufs_pa_layer_attr *pwr_info;
+	struct ufshcd_tx_eq_params *params;
+	struct ufshcd_tx_eqtr_record *rec;
+	u32 rate = hba->pwr_info.hs_rate;
+	u8 preshoot, deemphasis;
+	u32 num_lanes;
+	char name[32];
+	int lane;
+
+	if (!ufshcd_is_tx_eq_supported(hba))
+		return -EOPNOTSUPP;
+
+	if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) {
+		seq_printf(s, "Invalid gear selected: %u\n", gear);
+		return 0;
+	}
+
+	if (!hba->max_pwr_info.is_valid) {
+		seq_puts(s, "Max power info is invalid\n");
+		return 0;
+	}
+
+	pwr_info = &hba->max_pwr_info.info;
+	params = &hba->tx_eq_params[gear - 1];
+	if (!params->is_valid) {
+		seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n",
+			   gear, ufs_hs_rate_to_str(rate));
+		return 0;
+	}
+
+	rec = params->eqtr_record;
+	if (!rec || !rec->last_record_index) {
+		seq_printf(s, "No TX EQTR records found for HS-G%u, Rate-%s.\n",
+			   gear, ufs_hs_rate_to_str(rate));
+		return 0;
+	}
+
+	if (strcmp(file_name, "host_tx_eqtr_record") == 0) {
+		preshoot_bitmap = (hba->host_preshoot_cap << 0x1) | 0x1;
+		deemphasis_bitmap = (hba->host_deemphasis_cap << 0x1) | 0x1;
+		num_lanes = pwr_info->lane_tx;
+		fom_array = rec->host_fom;
+		snprintf(name, sizeof(name), "%s", "Host");
+	} else if (strcmp(file_name, "device_tx_eqtr_record") == 0) {
+		preshoot_bitmap = (hba->device_preshoot_cap << 0x1) | 0x1;
+		deemphasis_bitmap = (hba->device_deemphasis_cap << 0x1) | 0x1;
+		num_lanes = pwr_info->lane_rx;
+		fom_array = rec->device_fom;
+		snprintf(name, sizeof(name), "%s", "Device");
+	} else {
+		return -ENOENT;
+	}
+
+	seq_printf(s, "%s TX EQTR record summary -\n", name);
+	seq_printf(s, "Target Power Mode: HS-G%u, Rate-%s\n", gear,
+		   ufs_hs_rate_to_str(rate));
+	seq_printf(s, "Most recent record index: %d\n",
+		   rec->last_record_index);
+	seq_printf(s, "Most recent record timestamp: %llu us\n",
+		   ktime_to_us(rec->last_record_ts));
+
+	for (lane = 0; lane < num_lanes; lane++) {
+		seq_printf(s, "\nTX Lane %d FOM - %s\n", lane, "PreShoot\\DeEmphasis");
+		seq_puts(s, "\\");
+		/* Print DeEmphasis header as X-axis. */
+		for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++)
+			seq_printf(s, "%8d%s", deemphasis, " ");
+		seq_puts(s, "\n");
+		/* Print matrix rows with PreShoot as Y-axis. */
+		for (preshoot = 0; preshoot < TX_HS_NUM_PRESHOOT; preshoot++) {
+			seq_printf(s, "%d", preshoot);
+			for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) {
+				if (test_bit(preshoot, &preshoot_bitmap) &&
+				    test_bit(deemphasis, &deemphasis_bitmap)) {
+					u8 fom = fom_array[lane][preshoot][deemphasis];
+					u8 fom_val = fom & RX_FOM_VALUE_MASK;
+					bool precode_en = fom & RX_FOM_PRECODING_EN_BIT;
+
+					if (ufshcd_is_txeq_presets_used(hba) &&
+					    !ufshcd_is_txeq_preset_selected(preshoot, deemphasis))
+						seq_printf(s, "%8s%s", "-", " ");
+					else
+						seq_printf(s, "%8u%s", fom_val,
+							   precode_en ? "*" : " ");
+				} else {
+					seq_printf(s, "%8s%s", "x", " ");
+				}
+			}
+			seq_puts(s, "\n");
+		}
+	}
+
+	return 0;
+}
+
+static int ufs_tx_eqtr_record_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufs_tx_eqtr_record_show, inode->i_private);
+}
+
+static const struct file_operations ufs_tx_eqtr_record_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ufs_tx_eqtr_record_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static ssize_t ufs_tx_eq_ctrl_write(struct file *file, const char __user *buf,
+				    size_t count, loff_t *ppos)
+{
+	u32 gear = (u32)(uintptr_t)file->f_inode->i_private;
+	struct ufs_hba *hba = hba_from_file(file);
+	char kbuf[32];
+	int ret;
+
+	if (count >= sizeof(kbuf))
+		return -EINVAL;
+
+	if (copy_from_user(kbuf, buf, count))
+		return -EFAULT;
+
+	if (!ufshcd_is_tx_eq_supported(hba))
+		return -EOPNOTSUPP;
+
+	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
+	    !hba->max_pwr_info.is_valid)
+		return -EBUSY;
+
+	if (!hba->ufs_device_wlun)
+		return -ENODEV;
+
+	kbuf[count] = '\0';
+
+	if (sysfs_streq(kbuf, "retrain")) {
+		ret = ufs_debugfs_get_user_access(hba);
+		if (ret)
+			return ret;
+		ret = ufshcd_retrain_tx_eq(hba, gear);
+		ufs_debugfs_put_user_access(hba);
+	} else {
+		/* Unknown operation */
+		return -EINVAL;
+	}
+
+	return ret ? ret : count;
+}
+
+static int ufs_tx_eq_ctrl_show(struct seq_file *s, void *data)
+{
+	seq_puts(s, "write 'retrain' to retrain TX Equalization settings\n");
+	return 0;
+}
+
+static int ufs_tx_eq_ctrl_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufs_tx_eq_ctrl_show, inode->i_private);
+}
+
+static const struct file_operations ufs_tx_eq_ctrl_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ufs_tx_eq_ctrl_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.write		= ufs_tx_eq_ctrl_write,
+	.release	= single_release,
+};
+
+static const struct ufs_debugfs_attr ufs_tx_eqtr_attrs[] = {
+	{ "host_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops },
+	{ "device_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops },
+	{ "tx_eq_ctrl", 0600, &ufs_tx_eq_ctrl_fops },
+	{ }
+};
+
 void ufs_debugfs_hba_init(struct ufs_hba *hba)
 {
 	const struct ufs_debugfs_attr *attr;
@@ -230,6 +489,37 @@ void ufs_debugfs_hba_init(struct ufs_hba *hba)
 			    hba, &ee_usr_mask_fops);
 	debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
 			   &hba->debugfs_ee_rate_limit_ms);
+
+	if (!(hba->caps & UFSHCD_CAP_TX_EQUALIZATION))
+		return;
+
+	for (u32 gear = UFS_HS_G1; gear <= UFS_HS_GEAR_MAX; gear++) {
+		struct dentry *txeq_dir;
+		char name[32];
+
+		snprintf(name, sizeof(name), "tx_eq_hs_gear%d", gear);
+		txeq_dir = debugfs_create_dir(name, hba->debugfs_root);
+		if (IS_ERR_OR_NULL(txeq_dir))
+			return;
+
+		d_inode(txeq_dir)->i_private = hba;
+
+		/* Create files for TX Equalization parameters */
+		for (attr = ufs_tx_eq_attrs; attr->name; attr++)
+			debugfs_create_file(attr->name, attr->mode, txeq_dir,
+					    (void *)(uintptr_t)gear,
+					    attr->fops);
+
+		/* TX EQTR is supported for HS-G4 and higher Gears */
+		if (gear < UFS_HS_G4)
+			continue;
+
+		/* Create files for TX EQTR related attributes */
+		for (attr = ufs_tx_eqtr_attrs; attr->name; attr++)
+			debugfs_create_file(attr->name, attr->mode, txeq_dir,
+					    (void *)(uintptr_t)gear,
+					    attr->fops);
+	}
 }
 
 void ufs_debugfs_hba_exit(struct ufs_hba *hba)
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 18a95b728633..1b3062577945 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -31,7 +31,8 @@
 
 #define UFSHCD_ENABLE_MCQ_INTRS	(UTP_TASK_REQ_COMPL |\
 				 UFSHCD_ERROR_MASK |\
-				 MCQ_CQ_EVENT_STATUS)
+				 MCQ_CQ_EVENT_STATUS |\
+				 MCQ_IAG_EVENT_STATUS)
 
 /* Max mcq register polling time in microseconds */
 #define MCQ_POLL_US 500000
@@ -272,14 +273,29 @@ void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
 }
 EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
 
+u32 ufshcd_mcq_read_mcqiacr(struct ufs_hba *hba, int i)
+{
+	return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_MCQIACR);
+}
+
+void ufshcd_mcq_write_mcqiacr(struct ufs_hba *hba, u32 val, int i)
+{
+	writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_MCQIACR);
+}
+
 /*
- * Current MCQ specification doesn't provide a Task Tag or its equivalent in
+ * UFSHCI 4.0 MCQ specification doesn't provide a Task Tag or its equivalent in
  * the Completion Queue Entry. Find the Task Tag using an indirect method.
+ * UFSHCI 4.1 and above can directly return the Task Tag in the Completion Queue
+ * Entry.
  */
 static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
 {
 	u64 addr;
 
+	if (hba->ufs_version >= ufshci_version(4, 1))
+		return cqe->task_tag;
+
 	/* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
 	BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
 
@@ -301,6 +317,8 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
 		ufshcd_compl_one_cqe(hba, tag, cqe);
 		/* After processed the cqe, mark it empty (invalid) entry */
 		cqe->command_desc_base_addr = 0;
+	} else {
+		dev_err(hba->dev, "Abnormal CQ entry!\n");
 	}
 }
 
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 384d958615d7..99af3c73f1af 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -605,6 +605,34 @@ static ssize_t device_lvl_exception_id_show(struct device *dev,
 	return sysfs_emit(buf, "%llu\n", exception_id);
 }
 
+static ssize_t dme_qos_notification_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return sysfs_emit(buf, "0x%x\n", atomic_read(&hba->dme_qos_notification));
+}
+
+static ssize_t dme_qos_notification_store(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned int value;
+
+	if (kstrtouint(buf, 0, &value))
+		return -EINVAL;
+
+	/* the only supported usecase is to reset the dme_qos_notification */
+	if (value)
+		return -EINVAL;
+
+	atomic_set(&hba->dme_qos_notification, 0);
+
+	return count;
+}
+
 static DEVICE_ATTR_RW(rpm_lvl);
 static DEVICE_ATTR_RO(rpm_target_dev_state);
 static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -621,6 +649,7 @@ static DEVICE_ATTR_RW(pm_qos_enable);
 static DEVICE_ATTR_RO(critical_health);
 static DEVICE_ATTR_RW(device_lvl_exception_count);
 static DEVICE_ATTR_RO(device_lvl_exception_id);
+static DEVICE_ATTR_RW(dme_qos_notification);
 
 static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
 	&dev_attr_rpm_lvl.attr,
@@ -639,6 +668,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
 	&dev_attr_critical_health.attr,
 	&dev_attr_device_lvl_exception_count.attr,
 	&dev_attr_device_lvl_exception_id.attr,
+	&dev_attr_dme_qos_notification.attr,
 	NULL
 };
 
diff --git a/drivers/ufs/core/ufs-txeq.c b/drivers/ufs/core/ufs-txeq.c
new file mode 100644
index 000000000000..b2dc89124353
--- /dev/null
+++ b/drivers/ufs/core/ufs-txeq.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2026 Qualcomm Technologies, Inc.
+ *
+ * Author:
+ *	Can Guo <can.guo@oss.qualcomm.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
+#include "ufshcd-priv.h"
+
+static bool use_adaptive_txeq;
+module_param(use_adaptive_txeq, bool, 0644);
+MODULE_PARM_DESC(use_adaptive_txeq, "Find and apply optimal TX Equalization settings before changing Power Mode (default: false)");
+
+static int txeq_gear_set(const char *val, const struct kernel_param *kp)
+{
+	return param_set_uint_minmax(val, kp, UFS_HS_G1, UFS_HS_GEAR_MAX);
+}
+
+static const struct kernel_param_ops txeq_gear_ops = {
+	.set = txeq_gear_set,
+	.get = param_get_uint,
+};
+
+static unsigned int adaptive_txeq_gear = UFS_HS_G6;
+module_param_cb(adaptive_txeq_gear, &txeq_gear_ops, &adaptive_txeq_gear, 0644);
+MODULE_PARM_DESC(adaptive_txeq_gear, "For HS-Gear[n] and above, adaptive txeq shall be used");
+
+static bool use_txeq_presets;
+module_param(use_txeq_presets, bool, 0644);
+MODULE_PARM_DESC(use_txeq_presets, "Use only the 8 TX Equalization Presets (pre-defined Pre-Shoot & De-Emphasis combinations) for TX EQTR (default: false)");
+
+static bool txeq_presets_selected[UFS_TX_EQ_PRESET_MAX] = {[0 ... (UFS_TX_EQ_PRESET_MAX - 1)] = 1};
+module_param_array(txeq_presets_selected, bool, NULL, 0644);
+MODULE_PARM_DESC(txeq_presets_selected, "Use only the selected Presets out of the 8 TX Equalization Presets for TX EQTR");
+
+/*
+ * ufs_tx_eq_preset - Table of minimum required list of presets.
+ *
+ * A HS-G6 capable M-TX shall support the presets defined in M-PHY v6.0 spec.
+ * Preset	Pre-Shoot(dB)	De-Emphasis(dB)
+ * P0		0.0		0.0
+ * P1		0.0		0.8
+ * P2		0.0		1.6
+ * P3		0.8		0.0
+ * P4		1.6		0.0
+ * P5		0.8		0.8
+ * P6		0.8		1.6
+ * P7		1.6		0.8
+ */
+static const struct __ufs_tx_eq_preset {
+	u8 preshoot;
+	u8 deemphasis;
+} ufs_tx_eq_preset[UFS_TX_EQ_PRESET_MAX] = {
+	[UFS_TX_EQ_PRESET_P0] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_0P0},
+	[UFS_TX_EQ_PRESET_P1] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_0P8},
+	[UFS_TX_EQ_PRESET_P2] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_1P6},
+	[UFS_TX_EQ_PRESET_P3] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_0P0},
+	[UFS_TX_EQ_PRESET_P4] = {UFS_TX_HS_PRESHOOT_DB_1P6, UFS_TX_HS_DEEMPHASIS_DB_0P0},
+	[UFS_TX_EQ_PRESET_P5] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_0P8},
+	[UFS_TX_EQ_PRESET_P6] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_1P6},
+	[UFS_TX_EQ_PRESET_P7] = {UFS_TX_HS_PRESHOOT_DB_1P6, UFS_TX_HS_DEEMPHASIS_DB_0P8},
+};
+
+/*
+ * pa_peer_rx_adapt_initial - Table of UniPro PA_PeerRxHSGnAdaptInitial
+ * attribute IDs for High Speed (HS) Gears.
+ *
+ * This table maps HS Gears to their respective UniPro PA_PeerRxHSGnAdaptInitial
+ * attribute IDs. Entries for Gears 1-3 are 0 (unsupported).
+ */
+static const u32 pa_peer_rx_adapt_initial[UFS_HS_GEAR_MAX] = {
+	0,
+	0,
+	0,
+	PA_PEERRXHSG4ADAPTINITIAL,
+	PA_PEERRXHSG5ADAPTINITIAL,
+	PA_PEERRXHSG6ADAPTINITIALL0L3
+};
+
+/*
+ * rx_adapt_initial_cap - Table of M-PHY RX_HS_Gn_ADAPT_INITIAL_Capability
+ * attribute IDs for High Speed (HS) Gears.
+ *
+ * This table maps HS Gears to their respective M-PHY
+ * RX_HS_Gn_ADAPT_INITIAL_Capability attribute IDs. Entries for Gears 1-3 are 0
+ * (unsupported).
+ */
+static const u32 rx_adapt_initial_cap[UFS_HS_GEAR_MAX] = {
+	0,
+	0,
+	0,
+	RX_HS_G4_ADAPT_INITIAL_CAP,
+	RX_HS_G5_ADAPT_INITIAL_CAP,
+	RX_HS_G6_ADAPT_INITIAL_CAP
+};
+
+/*
+ * pa_tx_eq_setting - Table of UniPro PA_TxEQGnSetting attribute IDs for High
+ * Speed (HS) Gears.
+ *
+ * This table maps HS Gears to their respective UniPro PA_TxEQGnSetting
+ * attribute IDs.
+ */
+static const u32 pa_tx_eq_setting[UFS_HS_GEAR_MAX] = {
+	PA_TXEQG1SETTING,
+	PA_TXEQG2SETTING,
+	PA_TXEQG3SETTING,
+	PA_TXEQG4SETTING,
+	PA_TXEQG5SETTING,
+	PA_TXEQG6SETTING
+};
+
+/**
+ * ufshcd_configure_precoding - Configure Pre-Coding for all active lanes
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure
+ *
+ * Bit[7] in RX_FOM indicates that the receiver needs to enable Pre-Coding when
+ * set. Pre-Coding must be enabled on both the transmitter and receiver to
+ * ensure proper operation.
+ *
+ * Returns 0 on success, non-zero error code otherwise
+ */
+static int ufshcd_configure_precoding(struct ufs_hba *hba,
+				      struct ufshcd_tx_eq_params *params)
+{
+	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+	u32 local_precode_en = 0;
+	u32 peer_precode_en = 0;
+	int lane, ret;
+
+	/* Enable Pre-Coding for Host's TX & Device's RX pair */
+	for (lane = 0; lane < pwr_info->lane_tx; lane++) {
+		if (params->host[lane].precode_en) {
+			local_precode_en |= PRECODEEN_TX_BIT(lane);
+			peer_precode_en |= PRECODEEN_RX_BIT(lane);
+		}
+	}
+
+	/* Enable Pre-Coding for Device's TX & Host's RX pair */
+	for (lane = 0; lane < pwr_info->lane_rx; lane++) {
+		if (params->device[lane].precode_en) {
+			peer_precode_en |= PRECODEEN_TX_BIT(lane);
+			local_precode_en |= PRECODEEN_RX_BIT(lane);
+		}
+	}
+
+	if (!local_precode_en && !peer_precode_en) {
+		dev_dbg(hba->dev, "Pre-Coding is not required for Host and Device\n");
+		return 0;
+	}
+
+	/* Set local PA_PreCodeEn */
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PRECODEEN), local_precode_en);
+	if (ret) {
+		dev_err(hba->dev, "Failed to set local PA_PreCodeEn: %d\n", ret);
+		return ret;
+	}
+
+	/* Set peer PA_PreCodeEn */
+	ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_PRECODEEN), peer_precode_en);
+	if (ret) {
+		dev_err(hba->dev, "Failed to set peer PA_PreCodeEn: %d\n", ret);
+		return ret;
+	}
+
+	dev_dbg(hba->dev, "Local PA_PreCodeEn: 0x%02x, Peer PA_PreCodeEn: 0x%02x\n",
+		local_precode_en, peer_precode_en);
+
+	return 0;
+}
+
+void ufshcd_print_tx_eq_params(struct ufs_hba *hba)
+{
+	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+	struct ufshcd_tx_eq_params *params;
+	u32 gear = hba->pwr_info.gear_tx;
+	int lane;
+
+	if (!ufshcd_is_tx_eq_supported(hba))
+		return;
+
+	if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX)
+		return;
+
+	params = &hba->tx_eq_params[gear - 1];
+	if (!params->is_valid || !params->is_applied)
+		return;
+
+	for (lane = 0; lane < pwr_info->lane_tx; lane++)
+		dev_dbg(hba->dev, "Host TX Lane %d: PreShoot %u, DeEmphasis %u, FOM %u, PreCodeEn %d\n",
+			lane, params->host[lane].preshoot,
+			params->host[lane].deemphasis,
+			params->host[lane].fom_val,
+			params->host[lane].precode_en);
+
+	for (lane = 0; lane < pwr_info->lane_rx; lane++)
+		dev_dbg(hba->dev, "Device TX Lane %d: PreShoot %u, DeEmphasis %u, FOM %u, PreCodeEn %d\n",
+			lane, params->device[lane].preshoot,
+			params->device[lane].deemphasis,
+			params->device[lane].fom_val,
+			params->device[lane].precode_en);
+}
+
+static inline u32
+ufshcd_compose_tx_eq_setting(struct ufshcd_tx_eq_settings *settings,
+			     int num_lanes)
+{
+	u32 setting = 0;
+	int lane;
+
+	for (lane = 0; lane < num_lanes; lane++, settings++) {
+		setting |= TX_HS_PRESHOOT_BITS(lane, settings->preshoot);
+		setting |= TX_HS_DEEMPHASIS_BITS(lane, settings->deemphasis);
+	}
+
+	return setting;
+}
+
+/**
+ * ufshcd_apply_tx_eq_settings - Apply TX Equalization settings for target gear
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure
+ * @gear: target gear
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba,
+				struct ufshcd_tx_eq_params *params, u32 gear)
+{
+	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+	u32 setting;
+	int ret;
+
+	/* Compose settings for Host's TX Lanes */
+	setting = ufshcd_compose_tx_eq_setting(params->host, pwr_info->lane_tx);
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(pa_tx_eq_setting[gear - 1]), setting);
+	if (ret)
+		return ret;
+
+	/* Compose settings for Device's TX Lanes */
+	setting = ufshcd_compose_tx_eq_setting(params->device, pwr_info->lane_rx);
+	ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(pa_tx_eq_setting[gear - 1]), setting);
+	if (ret)
+		return ret;
+
+	/* Configure Pre-Coding */
+	if (gear >= UFS_HS_G6) {
+		ret = ufshcd_configure_precoding(hba, params);
+		if (ret) {
+			dev_err(hba->dev, "Failed to configure pre-coding: %d\n", ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_apply_tx_eq_settings);
+
+/**
+ * ufshcd_evaluate_tx_eqtr_fom - Evaluate TX EQTR FOM results
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate information
+ * @eqtr_data: TX EQTR data structure
+ * @h_iter: host TX EQTR iterator data structure
+ * @d_iter: device TX EQTR iterator data structure
+ *
+ * Evaluate TX EQTR FOM results, update host and device TX EQTR data accordingy
+ * if FOM have been improved compared to previous iteration, and record TX EQTR
+ * FOM results.
+ */
+static void ufshcd_evaluate_tx_eqtr_fom(struct ufs_hba *hba,
+					struct ufs_pa_layer_attr *pwr_mode,
+					struct ufshcd_tx_eqtr_data *eqtr_data,
+					struct tx_eqtr_iter *h_iter,
+					struct tx_eqtr_iter *d_iter)
+{
+	u8 preshoot, deemphasis, fom_value;
+	bool precode_en;
+	int lane;
+
+	for (lane = 0; h_iter->is_updated && lane < pwr_mode->lane_tx; lane++) {
+		preshoot = h_iter->preshoot;
+		deemphasis = h_iter->deemphasis;
+		fom_value = h_iter->fom[lane] & RX_FOM_VALUE_MASK;
+		precode_en = h_iter->fom[lane] & RX_FOM_PRECODING_EN_BIT;
+
+		/* Record host TX EQTR FOM */
+		eqtr_data->host_fom[lane][preshoot][deemphasis] = h_iter->fom[lane];
+
+		/* Check if FOM has been improved for host's TX Lanes */
+		if (fom_value > eqtr_data->host[lane].fom_val) {
+			eqtr_data->host[lane].preshoot = preshoot;
+			eqtr_data->host[lane].deemphasis = deemphasis;
+			eqtr_data->host[lane].fom_val = fom_value;
+			eqtr_data->host[lane].precode_en = precode_en;
+		}
+
+		dev_dbg(hba->dev, "TX EQTR: Host TX Lane %d: PreShoot %u, DeEmphasis %u, FOM value %u, PreCodeEn %d\n",
+			lane, preshoot, deemphasis, fom_value, precode_en);
+	}
+
+	for (lane = 0; d_iter->is_updated && lane < pwr_mode->lane_rx; lane++) {
+		preshoot = d_iter->preshoot;
+		deemphasis = d_iter->deemphasis;
+		fom_value = d_iter->fom[lane] & RX_FOM_VALUE_MASK;
+		precode_en = d_iter->fom[lane] & RX_FOM_PRECODING_EN_BIT;
+
+		/* Record device TX EQTR FOM */
+		eqtr_data->device_fom[lane][preshoot][deemphasis] = d_iter->fom[lane];
+
+		/* Check if FOM has been improved for Device's TX Lanes */
+		if (fom_value > eqtr_data->device[lane].fom_val) {
+			eqtr_data->device[lane].preshoot = preshoot;
+			eqtr_data->device[lane].deemphasis = deemphasis;
+			eqtr_data->device[lane].fom_val = fom_value;
+			eqtr_data->device[lane].precode_en = precode_en;
+		}
+
+		dev_dbg(hba->dev, "TX EQTR: Device TX Lane %d: PreShoot %u, DeEmphasis %u, FOM value %u, PreCodeEn %d\n",
+			lane, preshoot, deemphasis, fom_value, precode_en);
+	}
+}
+
+/**
+ * ufshcd_get_rx_fom - Get Figure of Merit (FOM) for both sides
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate information
+ * @h_iter: host TX EQTR iterator data structure
+ * @d_iter: device TX EQTR iterator data structure
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int ufshcd_get_rx_fom(struct ufs_hba *hba,
+			     struct ufs_pa_layer_attr *pwr_mode,
+			     struct tx_eqtr_iter *h_iter,
+			     struct tx_eqtr_iter *d_iter)
+{
+	int lane, ret;
+	u32 fom;
+
+	/* Get FOM of host's TX lanes from device's RX_FOM. */
+	for (lane = 0; lane < pwr_mode->lane_tx; lane++) {
+		ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB_SEL(RX_FOM,
+					  UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+					  &fom);
+		if (ret)
+			return ret;
+
+		h_iter->fom[lane] = (u8)fom;
+	}
+
+	/* Get FOM of device's TX lanes from host's RX_FOM. */
+	for (lane = 0; lane < pwr_mode->lane_rx; lane++) {
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_FOM,
+				     UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+				     &fom);
+		if (ret)
+			return ret;
+
+		d_iter->fom[lane] = (u8)fom;
+	}
+
+	ret = ufshcd_vops_get_rx_fom(hba, pwr_mode, h_iter, d_iter);
+	if (ret)
+		dev_err(hba->dev, "Failed to get FOM via vops: %d\n", ret);
+
+	return ret;
+}
+
+bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba)
+{
+	return use_txeq_presets;
+}
+
+bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis)
+{
+	int i;
+
+	for (i = 0; i < UFS_TX_EQ_PRESET_MAX; i++) {
+		if (!txeq_presets_selected[i])
+			continue;
+
+		if (preshoot == ufs_tx_eq_preset[i].preshoot &&
+		    deemphasis == ufs_tx_eq_preset[i].deemphasis)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * tx_eqtr_iter_try_update - Try to update a TX EQTR iterator
+ * @iter: TX EQTR iterator data structure
+ * @preshoot: PreShoot value
+ * @deemphasis: DeEmphasis value
+ *
+ * This function validates whether the provided PreShoot and DeEmphasis
+ * combination can be used or not. If yes, it updates the TX EQTR iterator with
+ * the provided PreShoot and DeEmphasis, it also sets the is_updated flag
+ * to indicate the iterator has been updated.
+ */
+static void tx_eqtr_iter_try_update(struct tx_eqtr_iter *iter,
+				    u8 preshoot, u8 deemphasis)
+{
+	if (!test_bit(preshoot, &iter->preshoot_bitmap) ||
+	    !test_bit(deemphasis, &iter->deemphasis_bitmap) ||
+	    (use_txeq_presets && !ufshcd_is_txeq_preset_selected(preshoot, deemphasis))) {
+		iter->is_updated = false;
+		return;
+	}
+
+	iter->preshoot = preshoot;
+	iter->deemphasis = deemphasis;
+	iter->is_updated = true;
+}
+
+/**
+ * tx_eqtr_iter_update() - Update host and deviceTX EQTR iterators
+ * @preshoot: PreShoot value
+ * @deemphasis: DeEmphasis value
+ * @h_iter: Host TX EQTR iterator data structure
+ * @d_iter: Device TX EQTR iterator data structure
+ *
+ * Updates host and device TX Equalization training iterators with the
+ * provided PreShoot and DeEmphasis.
+ *
+ * Return: true if host and/or device TX Equalization training iterator has
+ * been updated to the provided PreShoot and DeEmphasis, false otherwise.
+ */
+static bool tx_eqtr_iter_update(u8 preshoot, u8 deemphasis,
+				struct tx_eqtr_iter *h_iter,
+				struct tx_eqtr_iter *d_iter)
+{
+	tx_eqtr_iter_try_update(h_iter, preshoot, deemphasis);
+	tx_eqtr_iter_try_update(d_iter, preshoot, deemphasis);
+
+	return h_iter->is_updated || d_iter->is_updated;
+}
+
+/**
+ * ufshcd_tx_eqtr_iter_init - Initialize host and device TX EQTR iterators
+ * @hba: per adapter instance
+ * @h_iter: host TX EQTR iterator data structure
+ * @d_iter: device TX EQTR iterator data structure
+ *
+ * This function initializes the TX EQTR iterator structures for both host and
+ * device by reading their TX equalization capabilities. The capabilities are
+ * cached in the hba structure to avoid redundant DME operations in subsequent
+ * calls. In the TX EQTR procedure, the iterator structures are updated by
+ * tx_eqtr_iter_update() to systematically iterate through supported TX
+ * Equalization setting combinations.
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int ufshcd_tx_eqtr_iter_init(struct ufs_hba *hba,
+				    struct tx_eqtr_iter *h_iter,
+				    struct tx_eqtr_iter *d_iter)
+{
+	u32 cap;
+	int ret;
+
+	if (!hba->host_preshoot_cap) {
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB(TX_HS_PRESHOOT_SETTING_CAP), &cap);
+		if (ret)
+			return ret;
+
+		hba->host_preshoot_cap = cap & TX_EQTR_CAP_MASK;
+	}
+
+	if (!hba->host_deemphasis_cap) {
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB(TX_HS_DEEMPHASIS_SETTING_CAP), &cap);
+		if (ret)
+			return ret;
+
+		hba->host_deemphasis_cap = cap & TX_EQTR_CAP_MASK;
+	}
+
+	if (!hba->device_preshoot_cap) {
+		ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(TX_HS_PRESHOOT_SETTING_CAP), &cap);
+		if (ret)
+			return ret;
+
+		hba->device_preshoot_cap = cap & TX_EQTR_CAP_MASK;
+	}
+
+	if (!hba->device_deemphasis_cap) {
+		ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(TX_HS_DEEMPHASIS_SETTING_CAP), &cap);
+		if (ret)
+			return ret;
+
+		hba->device_deemphasis_cap = cap & TX_EQTR_CAP_MASK;
+	}
+
+	/*
+	 * Support PreShoot & DeEmphasis of value 0 is mandatory, hence they are
+	 * not reflected in PreShoot/DeEmphasis capabilities. Left shift the
+	 * capability bitmap by 1 and set bit[0] to reflect value 0 is
+	 * supported, such that test_bit() can be used later for convenience.
+	 */
+	h_iter->preshoot_bitmap = (hba->host_preshoot_cap << 0x1) | 0x1;
+	h_iter->deemphasis_bitmap = (hba->host_deemphasis_cap << 0x1) | 0x1;
+	d_iter->preshoot_bitmap = (hba->device_preshoot_cap << 0x1) | 0x1;
+	d_iter->deemphasis_bitmap = (hba->device_deemphasis_cap << 0x1) | 0x1;
+
+	return 0;
+}
+
+/**
+ * adapt_cap_to_t_adapt - Calculate TAdapt from adapt capability
+ * @adapt_cap: Adapt capability
+ *
+ * For NRZ:
+ *   IF (ADAPT_range = FINE)
+ *     TADAPT = 650 x (ADAPT_length + 1)
+ *   ELSE (IF ADAPT_range = COARSE)
+ *     TADAPT = 650 x 2^ADAPT_length
+ *
+ * Returns calculated TAdapt value in term of Unit Intervals (UI)
+ */
+static inline u64 adapt_cap_to_t_adapt(u32 adapt_cap)
+{
+	u64 tadapt;
+	u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK;
+
+	if (!IS_ADAPT_RANGE_COARSE(adapt_cap))
+		tadapt = TADAPT_FACTOR * (adapt_length + 1);
+	else
+		tadapt = TADAPT_FACTOR * (1 << adapt_length);
+
+	return tadapt;
+}
+
+/**
+ * adapt_cap_to_t_adapt_l0l3 - Calculate TAdapt_L0_L3 from adapt capability
+ * @adapt_cap: Adapt capability
+ *
+ * For PAM-4:
+ *   IF (ADAPT_range = FINE)
+ *     TADAPT_L0_L3 = 2^9 x ADAPT_length
+ *   ELSE IF (ADAPT_range = COARSE)
+ *     TADAPT_L0_L3 = 2^9 x (2^ADAPT_length)
+ *
+ * Returns calculated TAdapt value in term of Unit Intervals (UI)
+ */
+static inline u64 adapt_cap_to_t_adapt_l0l3(u32 adapt_cap)
+{
+	u64 tadapt;
+	u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK;
+
+	if (!IS_ADAPT_RANGE_COARSE(adapt_cap))
+		tadapt = TADAPT_L0L3_FACTOR * adapt_length;
+	else
+		tadapt = TADAPT_L0L3_FACTOR * (1 << adapt_length);
+
+	return tadapt;
+}
+
+/**
+ * adapt_cap_to_t_adapt_l0l1l2l3 - Calculate TAdapt_L0_L1_L2_L3 from adapt capability
+ * @adapt_cap: Adapt capability
+ *
+ * For PAM-4:
+ *   IF (ADAPT_range_L0_L1_L2_L3 = FINE)
+ *     TADAPT_L0_L1_L2_L3 = 2^15 x (ADAPT_length_L0_L1_L2_L3 + 1)
+ *   ELSE IF (ADAPT_range_L0_L1_L2_L3 = COARSE)
+ *     TADAPT_L0_L1_L2_L3 = 2^15 x 2^ADAPT_length_L0_L1_L2_L3
+ *
+ * Returns calculated TAdapt value in term of Unit Intervals (UI)
+ */
+static inline u64 adapt_cap_to_t_adapt_l0l1l2l3(u32 adapt_cap)
+{
+	u64 tadapt;
+	u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK;
+
+	if (!IS_ADAPT_RANGE_COARSE(adapt_cap))
+		tadapt = TADAPT_L0L1L2L3_FACTOR * (adapt_length + 1);
+	else
+		tadapt = TADAPT_L0L1L2L3_FACTOR * (1 << adapt_length);
+
+	return tadapt;
+}
+
+/**
+ * ufshcd_setup_tx_eqtr_adapt_length - Setup TX adapt length for EQTR
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure
+ * @gear: target gear for EQTR
+ *
+ * This function determines and configures the proper TX adapt length (TAdapt)
+ * for the TX EQTR procedure based on the target gear and RX adapt capabilities
+ * of both host and device.
+ *
+ * Guidelines from MIPI UniPro v3.0 spec - select the minimum Adapt Length for
+ * the Equalization Training procedure based on the following conditions:
+ *
+ * If the target High-Speed Gear n is HS-G4 or HS-G5:
+ *  PA_TxAdaptLength_EQTR[7:0] >= Max (10us, RX_HS_Gn_ADAPT_INITIAL_Capability,
+ *					PA_PeerRxHsGnAdaptInitial)
+ *  PA_TxAdaptLength_EQTR[7:0] shall be shorter than PACP_REQUEST_TIMER (10ms)
+ *  PA_TxAdaptLength_EQTR[15:8] is not relevant for HS-G4 and HS-G5. This field
+ *  is set to 255 (reserved value).
+ *
+ * If the target High-Speed Gear n is HS-G6:
+ *  PA_TxAdapthLength_EQTR >= 10us
+ *  PA_TxAdapthLength_EQTR[7:0] >= Max (RX_HS_G6_ADAPT_INITIAL_Capability,
+ *					PA_PeerRxHsG6AdaptInitialL0L3)
+ *  PA_TxAdapthLength_EQTR[15:8] >= Max (RX_HS_G6_ADAPT_INITIAL_L0_L1_L2_L3_Capability,
+ *					PA_PeerRxHsG6AdaptInitialL0L1L2L3)
+ * PA_TxAdaptLength_EQTR shall be shorter than PACP_REQUEST_TIMER value of 10ms.
+ *
+ * Since adapt capabilities encode both range (fine/coarse) and length values,
+ * direct comparison is not possible. This function converts adapt capabilities
+ * to actual time durations in Unit Intervals (UI) using the Adapt time
+ * calculation formular in M-PHY v6.0 spec (Table 8), then selects the maximum
+ * to ensure both host and device use adequate TX adapt length.
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int ufshcd_setup_tx_eqtr_adapt_length(struct ufs_hba *hba,
+					     struct ufshcd_tx_eq_params *params,
+					     u32 gear)
+{
+	struct ufshcd_tx_eqtr_record *rec = params->eqtr_record;
+	u32 adapt_eqtr;
+	int ret;
+
+	if (rec && rec->saved_adapt_eqtr) {
+		adapt_eqtr = rec->saved_adapt_eqtr;
+		goto set_adapt_eqtr;
+	}
+
+	if (gear == UFS_HS_G4 || gear == UFS_HS_G5) {
+		u64 t_adapt, t_adapt_local, t_adapt_peer;
+		u32 adapt_cap_local, adapt_cap_peer, adapt_length;
+
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(rx_adapt_initial_cap[gear - 1],
+				     UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+				     &adapt_cap_local);
+		if (ret)
+			return ret;
+
+		if (adapt_cap_local > ADAPT_LENGTH_MAX) {
+			dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n",
+				gear, adapt_cap_local);
+			return -EINVAL;
+		}
+
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB(pa_peer_rx_adapt_initial[gear - 1]),
+				     &adapt_cap_peer);
+		if (ret)
+			return ret;
+
+		if (adapt_cap_peer > ADAPT_LENGTH_MAX) {
+			dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n",
+				gear, adapt_cap_peer);
+			return -EINVAL;
+		}
+
+		t_adapt_local = adapt_cap_to_t_adapt(adapt_cap_local);
+		t_adapt_peer = adapt_cap_to_t_adapt(adapt_cap_peer);
+		t_adapt = max(t_adapt_local, t_adapt_peer);
+
+		dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n",
+			gear, adapt_cap_local);
+		dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n",
+			gear, adapt_cap_peer);
+		dev_dbg(hba->dev, "t_adapt_local = %llu UI, t_adapt_peer = %llu UI\n",
+			t_adapt_local, t_adapt_peer);
+		dev_dbg(hba->dev, "TAdapt %llu UI selected for TX EQTR\n",
+			t_adapt);
+
+		adapt_length = (t_adapt_local >= t_adapt_peer) ?
+			       adapt_cap_local : adapt_cap_peer;
+
+		if (gear == UFS_HS_G4 && t_adapt < TX_EQTR_HS_G4_MIN_T_ADAPT) {
+			dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n",
+				t_adapt, gear, TX_EQTR_HS_G4_ADAPT_DEFAULT);
+			adapt_length = TX_EQTR_HS_G4_ADAPT_DEFAULT;
+		} else if (gear == UFS_HS_G5 && t_adapt < TX_EQTR_HS_G5_MIN_T_ADAPT) {
+			dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n",
+				t_adapt, gear, TX_EQTR_HS_G5_ADAPT_DEFAULT);
+			adapt_length = TX_EQTR_HS_G5_ADAPT_DEFAULT;
+		}
+
+		adapt_eqtr = adapt_length |
+			     (TX_EQTR_ADAPT_RESERVED << TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT);
+	} else if (gear == UFS_HS_G6) {
+		u64 t_adapt, t_adapt_l0l3, t_adapt_l0l3_local, t_adapt_l0l3_peer;
+		u64 t_adapt_l0l1l2l3, t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer;
+		u32 adapt_l0l3_cap_local, adapt_l0l3_cap_peer, adapt_length_l0l3;
+		u32 adapt_l0l1l2l3_cap_local, adapt_l0l1l2l3_cap_peer, adapt_length_l0l1l2l3;
+
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(rx_adapt_initial_cap[gear - 1],
+				     UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+				     &adapt_l0l3_cap_local);
+		if (ret)
+			return ret;
+
+		if (adapt_l0l3_cap_local > ADAPT_L0L3_LENGTH_MAX) {
+			dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n",
+				gear, adapt_l0l3_cap_local);
+			return -EINVAL;
+		}
+
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB(pa_peer_rx_adapt_initial[gear - 1]),
+				     &adapt_l0l3_cap_peer);
+		if (ret)
+			return ret;
+
+		if (adapt_l0l3_cap_peer > ADAPT_L0L3_LENGTH_MAX) {
+			dev_err(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n",
+				gear, adapt_l0l3_cap_peer);
+			return -EINVAL;
+		}
+
+		t_adapt_l0l3_local = adapt_cap_to_t_adapt_l0l3(adapt_l0l3_cap_local);
+		t_adapt_l0l3_peer = adapt_cap_to_t_adapt_l0l3(adapt_l0l3_cap_peer);
+
+		dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n",
+			gear, adapt_l0l3_cap_local);
+		dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n",
+			gear, adapt_l0l3_cap_peer);
+		dev_dbg(hba->dev, "t_adapt_l0l3_local = %llu UI, t_adapt_l0l3_peer = %llu UI\n",
+			t_adapt_l0l3_local, t_adapt_l0l3_peer);
+
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_HS_G6_ADAPT_INITIAL_L0L1L2L3_CAP,
+				     UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+				     &adapt_l0l1l2l3_cap_local);
+		if (ret)
+			return ret;
+
+		if (adapt_l0l1l2l3_cap_local > ADAPT_L0L1L2L3_LENGTH_MAX) {
+			dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP (0x%x) exceeds MAX\n",
+				gear, adapt_l0l1l2l3_cap_local);
+			return -EINVAL;
+		}
+
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3),
+				     &adapt_l0l1l2l3_cap_peer);
+		if (ret)
+			return ret;
+
+		if (adapt_l0l1l2l3_cap_peer > ADAPT_L0L1L2L3_LENGTH_MAX) {
+			dev_err(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP (0x%x) exceeds MAX\n",
+				gear, adapt_l0l1l2l3_cap_peer);
+			return -EINVAL;
+		}
+
+		t_adapt_l0l1l2l3_local = adapt_cap_to_t_adapt_l0l1l2l3(adapt_l0l1l2l3_cap_local);
+		t_adapt_l0l1l2l3_peer = adapt_cap_to_t_adapt_l0l1l2l3(adapt_l0l1l2l3_cap_peer);
+
+		dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP = 0x%x\n",
+			gear, adapt_l0l1l2l3_cap_local);
+		dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP = 0x%x\n",
+			gear, adapt_l0l1l2l3_cap_peer);
+		dev_dbg(hba->dev, "t_adapt_l0l1l2l3_local = %llu UI, t_adapt_l0l1l2l3_peer = %llu UI\n",
+			t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer);
+
+		t_adapt_l0l1l2l3 = max(t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer);
+		t_adapt_l0l3 = max(t_adapt_l0l3_local, t_adapt_l0l3_peer);
+		t_adapt = t_adapt_l0l3 + t_adapt_l0l1l2l3;
+
+		dev_dbg(hba->dev, "TAdapt %llu PAM-4 UI selected for TX EQTR\n",
+			t_adapt);
+
+		adapt_length_l0l3 = (t_adapt_l0l3_local >= t_adapt_l0l3_peer) ?
+				    adapt_l0l3_cap_local : adapt_l0l3_cap_peer;
+		adapt_length_l0l1l2l3 = (t_adapt_l0l1l2l3_local >= t_adapt_l0l1l2l3_peer) ?
+					adapt_l0l1l2l3_cap_local : adapt_l0l1l2l3_cap_peer;
+
+		if (t_adapt < TX_EQTR_HS_G6_MIN_T_ADAPT) {
+			dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n",
+				t_adapt, gear, TX_EQTR_HS_G6_ADAPT_DEFAULT);
+			adapt_length_l0l3 = TX_EQTR_HS_G6_ADAPT_DEFAULT;
+		}
+
+		adapt_eqtr = adapt_length_l0l3 |
+			     (adapt_length_l0l1l2l3 << TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT);
+	} else {
+		return -EINVAL;
+	}
+
+	if (rec)
+		rec->saved_adapt_eqtr = (u16)adapt_eqtr;
+
+set_adapt_eqtr:
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXADAPTLENGTH_EQTR), adapt_eqtr);
+	if (ret)
+		dev_err(hba->dev, "Failed to set adapt length for TX EQTR: %d\n", ret);
+	else
+		dev_dbg(hba->dev, "PA_TXADAPTLENGTH_EQTR configured to 0x%08x\n", adapt_eqtr);
+
+	return ret;
+}
+
+/**
+ * ufshcd_compose_tx_eqtr_setting - Compose TX EQTR setting
+ * @iter: TX EQTR iterator data structure
+ * @num_lanes: number of active lanes
+ *
+ * Returns composed TX EQTR setting, same setting is used for all active lanes
+ */
+static inline u32 ufshcd_compose_tx_eqtr_setting(struct tx_eqtr_iter *iter,
+						 int num_lanes)
+{
+	u32 setting = 0;
+	int lane;
+
+	for (lane = 0; lane < num_lanes; lane++) {
+		setting |= TX_HS_PRESHOOT_BITS(lane, iter->preshoot);
+		setting |= TX_HS_DEEMPHASIS_BITS(lane, iter->deemphasis);
+	}
+
+	return setting;
+}
+
+/**
+ * ufshcd_apply_tx_eqtr_settings - Apply TX EQTR setting
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate information
+ * @h_iter: host TX EQTR iterator data structure
+ * @d_iter: device TX EQTR iterator data structure
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int ufshcd_apply_tx_eqtr_settings(struct ufs_hba *hba,
+					 struct ufs_pa_layer_attr *pwr_mode,
+					 struct tx_eqtr_iter *h_iter,
+					 struct tx_eqtr_iter *d_iter)
+{
+	u32 setting;
+	int ret;
+
+	setting = ufshcd_compose_tx_eqtr_setting(h_iter, pwr_mode->lane_tx);
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQTRSETTING), setting);
+	if (ret)
+		return ret;
+
+	setting = ufshcd_compose_tx_eqtr_setting(d_iter, pwr_mode->lane_rx);
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERTXEQTRSETTING), setting);
+	if (ret)
+		return ret;
+
+	ret = ufshcd_vops_apply_tx_eqtr_settings(hba, pwr_mode, h_iter, d_iter);
+
+	return ret;
+}
+
+/**
+ * ufshcd_update_tx_eq_params - Update TX Equalization params
+ * @params: TX EQ parameters data structure
+ * @pwr_mode: target power mode containing gear and rate
+ * @eqtr_data: TX EQTR data structure
+ *
+ * Update TX Equalization params using results from TX EQTR data. Check also
+ * the TX EQTR FOM value for each TX lane in the TX EQTR data. If a TX lane got
+ * a FOM value of 0, restore the TX Equalization settings from the last known
+ * valid TX Equalization params for that specific TX lane.
+ */
+static inline void
+ufshcd_update_tx_eq_params(struct ufshcd_tx_eq_params *params,
+			   struct ufs_pa_layer_attr *pwr_mode,
+			   struct ufshcd_tx_eqtr_data *eqtr_data)
+{
+	struct ufshcd_tx_eqtr_record *rec = params->eqtr_record;
+
+	if (params->is_valid) {
+		int lane;
+
+		for (lane = 0; lane < pwr_mode->lane_tx; lane++)
+			if (eqtr_data->host[lane].fom_val == 0)
+				eqtr_data->host[lane] = params->host[lane];
+
+		for (lane = 0; lane < pwr_mode->lane_rx; lane++)
+			if (eqtr_data->device[lane].fom_val == 0)
+				eqtr_data->device[lane] = params->device[lane];
+	}
+
+	memcpy(params->host, eqtr_data->host, sizeof(params->host));
+	memcpy(params->device, eqtr_data->device, sizeof(params->device));
+
+	if (!rec)
+		return;
+
+	memcpy(rec->host_fom, eqtr_data->host_fom, sizeof(rec->host_fom));
+	memcpy(rec->device_fom, eqtr_data->device_fom, sizeof(rec->device_fom));
+	rec->last_record_ts = ktime_get();
+	rec->last_record_index++;
+}
+
+/**
+ * __ufshcd_tx_eqtr - TX Equalization Training (EQTR) procedure
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure
+ * @pwr_mode: target power mode containing gear and rate information
+ *
+ * This function implements the complete TX EQTR procedure as defined in UFSHCI
+ * v5.0 specification. It iterates through all possible combinations of PreShoot
+ * and DeEmphasis settings to find the optimal TX Equalization settings for all
+ * active lanes.
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int __ufshcd_tx_eqtr(struct ufs_hba *hba,
+			    struct ufshcd_tx_eq_params *params,
+			    struct ufs_pa_layer_attr *pwr_mode)
+{
+	struct ufshcd_tx_eqtr_data *eqtr_data  __free(kfree) =
+		kzalloc(sizeof(*eqtr_data), GFP_KERNEL);
+	struct tx_eqtr_iter h_iter = {};
+	struct tx_eqtr_iter d_iter = {};
+	u32 gear = pwr_mode->gear_tx;
+	u8 preshoot, deemphasis;
+	ktime_t start;
+	int ret;
+
+	if (!eqtr_data)
+		return -ENOMEM;
+
+	dev_info(hba->dev, "Start TX EQTR procedure for HS-G%u, Rate-%s, RX Lanes: %u, TX Lanes: %u\n",
+		 gear, ufs_hs_rate_to_str(pwr_mode->hs_rate),
+		 pwr_mode->lane_rx, pwr_mode->lane_tx);
+
+	start = ktime_get();
+
+	/* Step 1 - Determine the TX Adapt Length for EQTR */
+	ret = ufshcd_setup_tx_eqtr_adapt_length(hba, params, gear);
+	if (ret) {
+		dev_err(hba->dev, "Failed to setup TX EQTR Adaptation length: %d\n", ret);
+		return ret;
+	}
+
+	/* Step 2 - Determine TX Equalization setting capabilities */
+	ret = ufshcd_tx_eqtr_iter_init(hba, &h_iter, &d_iter);
+	if (ret) {
+		dev_err(hba->dev, "Failed to init TX EQTR data: %d\n", ret);
+		return ret;
+	}
+
+	/* TX EQTR main loop */
+	for (preshoot = 0; preshoot < TX_HS_NUM_PRESHOOT; preshoot++) {
+		for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) {
+			if (!tx_eqtr_iter_update(preshoot, deemphasis, &h_iter, &d_iter))
+				continue;
+
+			/* Step 3 - Apply TX EQTR settings */
+			ret = ufshcd_apply_tx_eqtr_settings(hba, pwr_mode, &h_iter, &d_iter);
+			if (ret) {
+				dev_err(hba->dev, "Failed to apply TX EQTR settings (PreShoot %u, DeEmphasis %u): %d\n",
+					preshoot, deemphasis, ret);
+				return ret;
+			}
+
+			/* Step 4 - Trigger UIC TX EQTR */
+			ret = ufshcd_uic_tx_eqtr(hba, gear);
+			if (ret) {
+				dev_err(hba->dev, "Failed to trigger UIC TX EQTR for target gear %u: %d\n",
+					gear, ret);
+				return ret;
+			}
+
+			/* Step 5 - Get FOM */
+			ret = ufshcd_get_rx_fom(hba, pwr_mode, &h_iter, &d_iter);
+			if (ret) {
+				dev_err(hba->dev, "Failed to get RX_FOM: %d\n",
+					ret);
+				return ret;
+			}
+
+			ufshcd_evaluate_tx_eqtr_fom(hba, pwr_mode, eqtr_data, &h_iter, &d_iter);
+		}
+	}
+
+	dev_info(hba->dev, "TX EQTR procedure completed! Time elapsed: %llu ms\n",
+		 ktime_to_ms(ktime_sub(ktime_get(), start)));
+
+	ufshcd_update_tx_eq_params(params, pwr_mode, eqtr_data);
+
+	return ret;
+}
+
+/**
+ * ufshcd_tx_eqtr_prepare - Prepare UFS link for TX EQTR procedure
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate
+ *
+ * This function prepares the UFS link for TX Equalization Training (EQTR) by
+ * establishing the proper initial conditions required by the EQTR procedure.
+ * It ensures that EQTR starts from the most reliable Power Mode (HS-G1) with
+ * all connected lanes activated and sets host TX HS Adapt Type to INITIAL.
+ *
+ * Returns 0 on successful preparation, negative error code on failure
+ */
+static int ufshcd_tx_eqtr_prepare(struct ufs_hba *hba,
+				  struct ufs_pa_layer_attr *pwr_mode)
+{
+	struct ufs_pa_layer_attr pwr_mode_hs_g1 = {
+		/* TX EQTR shall be initiated from the most reliable HS-G1 */
+		.gear_rx = UFS_HS_G1,
+		.gear_tx = UFS_HS_G1,
+		.lane_rx = pwr_mode->lane_rx,
+		.lane_tx = pwr_mode->lane_tx,
+		.pwr_rx = FAST_MODE,
+		.pwr_tx = FAST_MODE,
+		/* Use the target power mode's HS rate */
+		.hs_rate = pwr_mode->hs_rate,
+	};
+	u32 rate = pwr_mode->hs_rate;
+	int ret;
+
+	/* Change power mode to HS-G1, activate all connected lanes. */
+	ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1,
+				       UFSHCD_PMC_POLICY_DONT_FORCE);
+	if (ret) {
+		dev_err(hba->dev, "TX EQTR: Failed to change power mode to HS-G1, Rate-%s: %d\n",
+			ufs_hs_rate_to_str(rate), ret);
+		return ret;
+	}
+
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+			     PA_INITIAL_ADAPT);
+	if (ret)
+		dev_err(hba->dev, "TX EQTR: Failed to set Host Adapt type to INITIAL: %d\n",
+			ret);
+
+	return ret;
+}
+
+static void ufshcd_tx_eqtr_unprepare(struct ufs_hba *hba,
+				     struct ufs_pa_layer_attr *pwr_mode)
+{
+	int err;
+
+	if (pwr_mode->pwr_rx == SLOWAUTO_MODE || pwr_mode->hs_rate == 0)
+		return;
+
+	err = ufshcd_change_power_mode(hba, pwr_mode,
+				       UFSHCD_PMC_POLICY_DONT_FORCE);
+	if (err)
+		dev_err(hba->dev, "%s: Failed to restore Power Mode: %d\n",
+			__func__, err);
+}
+
+/**
+ * ufshcd_tx_eqtr - Perform TX EQTR procedures with vops callbacks
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure to populate
+ * @pwr_mode: target power mode containing gear and rate information
+ *
+ * This is the main entry point for performing TX Equalization Training (EQTR)
+ * procedure as defined in UFSCHI v5.0 specification. It serves as a wrapper
+ * around __ufshcd_tx_eqtr() to provide vops support through the variant
+ * operations framework.
+ *
+ * Returns 0 on success, negative error code on failure
+ */
+static int ufshcd_tx_eqtr(struct ufs_hba *hba,
+			  struct ufshcd_tx_eq_params *params,
+			  struct ufs_pa_layer_attr *pwr_mode)
+{
+	struct ufs_pa_layer_attr old_pwr_info;
+	int ret;
+
+	if (!params->eqtr_record) {
+		params->eqtr_record = devm_kzalloc(hba->dev,
+						   sizeof(*params->eqtr_record),
+						   GFP_KERNEL);
+		if (!params->eqtr_record)
+			return -ENOMEM;
+	}
+
+	memcpy(&old_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
+
+	ret = ufshcd_tx_eqtr_prepare(hba, pwr_mode);
+	if (ret) {
+		dev_err(hba->dev, "Failed to prepare TX EQTR: %d\n", ret);
+		goto out;
+	}
+
+	ret = ufshcd_vops_tx_eqtr_notify(hba, PRE_CHANGE, pwr_mode);
+	if (ret)
+		goto out;
+
+	ret = __ufshcd_tx_eqtr(hba, params, pwr_mode);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_vops_tx_eqtr_notify(hba, POST_CHANGE, pwr_mode);
+
+out:
+	if (ret)
+		ufshcd_tx_eqtr_unprepare(hba, &old_pwr_info);
+
+	return ret;
+}
+
+/**
+ * ufshcd_config_tx_eq_settings - Configure TX Equalization settings
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate information
+ * @force_tx_eqtr: execute the TX EQTR procedure
+ *
+ * This function finds and sets the TX Equalization settings for the given
+ * target power mode.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int ufshcd_config_tx_eq_settings(struct ufs_hba *hba,
+				 struct ufs_pa_layer_attr *pwr_mode,
+				 bool force_tx_eqtr)
+{
+	struct ufshcd_tx_eq_params *params;
+	u32 gear, rate;
+
+	if (!ufshcd_is_tx_eq_supported(hba) || !use_adaptive_txeq)
+		return 0;
+
+	if (!hba->max_pwr_info.is_valid) {
+		dev_err(hba->dev, "Max power info is invalid\n");
+		return -EINVAL;
+	}
+
+	if (!pwr_mode) {
+		dev_err(hba->dev, "Target power mode is NULL\n");
+		return -EINVAL;
+	}
+
+	gear = pwr_mode->gear_tx;
+	rate = pwr_mode->hs_rate;
+
+	if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) {
+		dev_err(hba->dev, "Invalid HS-Gear (%u) for TX Equalization\n",
+			gear);
+		return -EINVAL;
+	} else if (gear < max_t(u32, adaptive_txeq_gear, UFS_HS_G4)) {
+		/* TX EQTR is supported for HS-G4 and higher Gears */
+		return 0;
+	}
+
+	if (rate != PA_HS_MODE_A && rate != PA_HS_MODE_B) {
+		dev_err(hba->dev, "Invalid HS-Rate (%u) for TX Equalization\n",
+			rate);
+		return -EINVAL;
+	}
+
+	params = &hba->tx_eq_params[gear - 1];
+	if (!params->is_valid || force_tx_eqtr) {
+		int ret;
+
+		ret = ufshcd_tx_eqtr(hba, params, pwr_mode);
+		if (ret) {
+			dev_err(hba->dev, "Failed to train TX Equalization for HS-G%u, Rate-%s: %d\n",
+				gear, ufs_hs_rate_to_str(rate), ret);
+			return ret;
+		}
+
+		/* Mark TX Equalization settings as valid */
+		params->is_valid = true;
+		params->is_applied = false;
+	}
+
+	if (params->is_valid && !params->is_applied) {
+		int ret;
+
+		ret = ufshcd_apply_tx_eq_settings(hba, params, gear);
+		if (ret) {
+			dev_err(hba->dev, "Failed to apply TX Equalization settings for HS-G%u, Rate-%s: %d\n",
+				gear, ufs_hs_rate_to_str(rate), ret);
+			return ret;
+		}
+
+		params->is_applied = true;
+	}
+
+	return 0;
+}
+
+/**
+ * ufshcd_apply_valid_tx_eq_settings - Apply valid TX Equalization settings
+ * @hba: per-adapter instance
+ *
+ * This function iterates through all supported High-Speed (HS) gears and
+ * applies valid TX Equalization settings to both Host and Device.
+ */
+void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba)
+{
+	struct ufshcd_tx_eq_params *params;
+	int gear, err;
+
+	if (!ufshcd_is_tx_eq_supported(hba))
+		return;
+
+	if (!hba->max_pwr_info.is_valid) {
+		dev_err(hba->dev, "Max power info is invalid, cannot apply TX Equalization settings\n");
+		return;
+	}
+
+	for (gear = UFS_HS_G1; gear <= UFS_HS_GEAR_MAX; gear++) {
+		params = &hba->tx_eq_params[gear - 1];
+
+		if (params->is_valid) {
+			err = ufshcd_apply_tx_eq_settings(hba, params, gear);
+			if (err) {
+				params->is_applied = false;
+				dev_err(hba->dev, "Failed to apply TX Equalization settings for HS-G%u: %d\n",
+					gear, err);
+			} else {
+				params->is_applied = true;
+			}
+		}
+	}
+}
+
+/**
+ * ufshcd_retrain_tx_eq - Retrain TX Equalization and apply new settings
+ * @hba: per-adapter instance
+ * @gear: target High-Speed (HS) gear for retraining
+ *
+ * This function initiates a refresh of the TX Equalization settings for a
+ * specific HS gear. It scales the clocks to maximum frequency, negotiates the
+ * power mode with the device, retrains TX EQ and applies new TX EQ settings
+ * by conducting a Power Mode change.
+ *
+ * Returns 0 on success, non-zero error code otherwise
+ */
+int ufshcd_retrain_tx_eq(struct ufs_hba *hba, u32 gear)
+{
+	struct ufs_pa_layer_attr new_pwr_info, final_params = {};
+	int ret;
+
+	if (!ufshcd_is_tx_eq_supported(hba) || !use_adaptive_txeq)
+		return -EOPNOTSUPP;
+
+	if (gear < adaptive_txeq_gear)
+		return -ERANGE;
+
+	ufshcd_hold(hba);
+
+	ret = ufshcd_pause_command_processing(hba, 1 * USEC_PER_SEC);
+	if (ret) {
+		ufshcd_release(hba);
+		return ret;
+	}
+
+	/* scale up clocks to max frequency before TX EQTR */
+	if (ufshcd_is_clkscaling_supported(hba))
+		ufshcd_scale_clks(hba, ULONG_MAX, true);
+
+	new_pwr_info = hba->pwr_info;
+	new_pwr_info.gear_tx = gear;
+	new_pwr_info.gear_rx = gear;
+
+	ret = ufshcd_vops_negotiate_pwr_mode(hba, &new_pwr_info, &final_params);
+	if (ret)
+		memcpy(&final_params, &new_pwr_info, sizeof(final_params));
+
+	if (final_params.gear_tx != gear) {
+		dev_err(hba->dev, "Negotiated Gear (%u) does not match target Gear (%u)\n",
+			final_params.gear_tx, gear);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = ufshcd_config_tx_eq_settings(hba, &final_params, true);
+	if (ret) {
+		dev_err(hba->dev, "Failed to config TX Equalization for HS-G%u, Rate-%s: %d\n",
+			final_params.gear_tx,
+			ufs_hs_rate_to_str(final_params.hs_rate), ret);
+		goto out;
+	}
+
+	/* Change Power Mode to apply the new TX EQ settings */
+	ret = ufshcd_change_power_mode(hba, &final_params,
+				       UFSHCD_PMC_POLICY_FORCE);
+	if (ret)
+		dev_err(hba->dev, "%s: Failed to change Power Mode to HS-G%u, Rate-%s: %d\n",
+			__func__, final_params.gear_tx,
+			ufs_hs_rate_to_str(final_params.hs_rate), ret);
+
+out:
+	ufshcd_resume_command_processing(hba);
+	ufshcd_release(hba);
+
+	return ret;
+}
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 37c32071e754..0a72148cb053 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -76,8 +76,13 @@ void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
 int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
 int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
+u32 ufshcd_mcq_read_mcqiacr(struct ufs_hba *hba, int i);
+void ufshcd_mcq_write_mcqiacr(struct ufs_hba *hba, u32 val, int i);
 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
 void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd);
+int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us);
+void ufshcd_resume_command_processing(struct ufs_hba *hba);
+int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up);
 
 /**
  * enum ufs_descr_fmt - UFS string descriptor format
@@ -103,6 +108,16 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
 int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
 
+int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear);
+void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba);
+int ufshcd_config_tx_eq_settings(struct ufs_hba *hba,
+				 struct ufs_pa_layer_attr *pwr_mode,
+				 bool force_tx_eqtr);
+void ufshcd_print_tx_eq_params(struct ufs_hba *hba);
+bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba);
+bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis);
+int ufshcd_retrain_tx_eq(struct ufs_hba *hba, u32 gear);
+
 /* Wrapper functions for safely calling variant operations */
 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
 {
@@ -167,14 +182,24 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
 	return 0;
 }
 
+static inline int ufshcd_vops_negotiate_pwr_mode(struct ufs_hba *hba,
+						 const struct ufs_pa_layer_attr *dev_max_params,
+						 struct ufs_pa_layer_attr *dev_req_params)
+{
+	if (hba->vops && hba->vops->negotiate_pwr_mode)
+		return hba->vops->negotiate_pwr_mode(hba, dev_max_params,
+					dev_req_params);
+
+	return -ENOTSUPP;
+}
+
 static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
 				enum ufs_notify_change_status status,
-				const struct ufs_pa_layer_attr *dev_max_params,
 				struct ufs_pa_layer_attr *dev_req_params)
 {
 	if (hba->vops && hba->vops->pwr_change_notify)
 		return hba->vops->pwr_change_notify(hba, status,
-					dev_max_params, dev_req_params);
+					dev_req_params);
 
 	return -ENOTSUPP;
 }
@@ -287,6 +312,38 @@ static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned l
 	return 0;
 }
 
+static inline int ufshcd_vops_get_rx_fom(struct ufs_hba *hba,
+					 struct ufs_pa_layer_attr *pwr_mode,
+					 struct tx_eqtr_iter *h_iter,
+					 struct tx_eqtr_iter *d_iter)
+{
+	if (hba->vops && hba->vops->get_rx_fom)
+		return hba->vops->get_rx_fom(hba, pwr_mode, h_iter, d_iter);
+
+	return 0;
+}
+
+static inline int ufshcd_vops_apply_tx_eqtr_settings(struct ufs_hba *hba,
+						     struct ufs_pa_layer_attr *pwr_mode,
+						     struct tx_eqtr_iter *h_iter,
+						     struct tx_eqtr_iter *d_iter)
+{
+	if (hba->vops && hba->vops->apply_tx_eqtr_settings)
+		return hba->vops->apply_tx_eqtr_settings(hba, pwr_mode, h_iter, d_iter);
+
+	return 0;
+}
+
+static inline int ufshcd_vops_tx_eqtr_notify(struct ufs_hba *hba,
+					     enum ufs_notify_change_status status,
+					     struct ufs_pa_layer_attr *pwr_mode)
+{
+	if (hba->vops && hba->vops->tx_eqtr_notify)
+		return hba->vops->tx_eqtr_notify(hba, status, pwr_mode);
+
+	return 0;
+}
+
 extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
 
 /**
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 847b55789bb8..cb81aa94d5c4 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -333,11 +333,7 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
-			     bool scale_up);
 static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
-			     struct ufs_pa_layer_attr *pwr_mode);
 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -1211,8 +1207,7 @@ static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq)
  *
  * Return: 0 if successful; < 0 upon failure.
  */
-static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
-			     bool scale_up)
+int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up)
 {
 	int ret = 0;
 	ktime_t start = ktime_get();
@@ -1365,6 +1360,48 @@ static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba,
 	return ret;
 }
 
+/**
+ * ufshcd_pause_command_processing - Pause command processing
+ * @hba: per-adapter instance
+ * @timeout_us: timeout in microseconds to wait for pending commands to finish
+ *
+ * This function stops new command submissions and waits for existing commands
+ * to complete.
+ *
+ * Return: 0 on success, %-EBUSY if commands did not finish within @timeout_us.
+ * On failure, all acquired locks are released and the tagset is unquiesced.
+ */
+int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us)
+{
+	int ret = 0;
+
+	mutex_lock(&hba->host->scan_mutex);
+	blk_mq_quiesce_tagset(&hba->host->tag_set);
+	down_write(&hba->clk_scaling_lock);
+
+	if (ufshcd_wait_for_pending_cmds(hba, timeout_us)) {
+		ret = -EBUSY;
+		up_write(&hba->clk_scaling_lock);
+		blk_mq_unquiesce_tagset(&hba->host->tag_set);
+		mutex_unlock(&hba->host->scan_mutex);
+	}
+
+	return ret;
+}
+
+/**
+ * ufshcd_resume_command_processing - Resume command processing
+ * @hba: per-adapter instance
+ *
+ * This function resumes command submissions.
+ */
+void ufshcd_resume_command_processing(struct ufs_hba *hba)
+{
+	up_write(&hba->clk_scaling_lock);
+	blk_mq_unquiesce_tagset(&hba->host->tag_set);
+	mutex_unlock(&hba->host->scan_mutex);
+}
+
 /**
  * ufshcd_scale_gear - scale up/down UFS gear
  * @hba: per adapter instance
@@ -1410,7 +1447,8 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up
 
 config_pwr_mode:
 	/* check if the power mode needs to be changed or not? */
-	ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
+	ret = ufshcd_config_pwr_mode(hba, &new_pwr_info,
+				     UFSHCD_PMC_POLICY_DONT_FORCE);
 	if (ret)
 		dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
 			__func__, ret,
@@ -4251,7 +4289,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
 			pwr_mode_change = true;
 		}
 		if (pwr_mode_change) {
-			ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
+			ret = ufshcd_change_power_mode(hba, &temp_pwr_info,
+						       UFSHCD_PMC_POLICY_DONT_FORCE);
 			if (ret)
 				goto out;
 		}
@@ -4275,7 +4314,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
 
 	if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
 	    && pwr_mode_change)
-		ufshcd_change_power_mode(hba, &orig_pwr_info);
+		ufshcd_change_power_mode(hba, &orig_pwr_info,
+					 UFSHCD_PMC_POLICY_DONT_FORCE);
 out:
 	return ret;
 }
@@ -4342,16 +4382,18 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 	ret = __ufshcd_send_uic_cmd(hba, cmd);
 	if (ret) {
 		dev_err(hba->dev,
-			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
-			cmd->command, cmd->argument3, ret);
+			"pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) uic error %d\n",
+			cmd->command, UIC_GET_ATTR_ID(cmd->argument1),
+			cmd->argument3, ret);
 		goto out;
 	}
 
 	if (!wait_for_completion_timeout(hba->uic_async_done,
 					 msecs_to_jiffies(uic_cmd_timeout))) {
 		dev_err(hba->dev,
-			"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
-			cmd->command, cmd->argument3);
+			"pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) completion timeout\n",
+			cmd->command, UIC_GET_ATTR_ID(cmd->argument1),
+			cmd->argument3);
 
 		if (!cmd->cmd_active) {
 			dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
@@ -4367,14 +4409,16 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 	status = ufshcd_get_upmcrs(hba);
 	if (status != PWR_LOCAL) {
 		dev_err(hba->dev,
-			"pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
-			cmd->command, status);
+			"pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) failed, host upmcrs:0x%x\n",
+			cmd->command, UIC_GET_ATTR_ID(cmd->argument1),
+			cmd->argument3, status);
 		ret = (status != PWR_OK) ? status : -1;
 	}
 out:
 	if (ret) {
 		ufshcd_print_host_state(hba);
 		ufshcd_print_pwr_info(hba);
+		ufshcd_print_tx_eq_params(hba);
 		ufshcd_print_evt_hist(hba);
 	}
 
@@ -4400,6 +4444,29 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 	return ret;
 }
 
+/**
+ * ufshcd_uic_tx_eqtr - Perform UIC TX Equalization Training
+ * @hba: per adapter instance
+ * @gear: target gear for EQTR
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear)
+{
+	struct uic_command uic_cmd = {
+		.command = UIC_CMD_DME_SET,
+		.argument1 = UIC_ARG_MIB(PA_EQTR_GEAR),
+		.argument3 = gear,
+	};
+	int ret;
+
+	ufshcd_hold(hba);
+	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+	ufshcd_release(hba);
+
+	return ret;
+}
+
 /**
  * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
  * @hba: per adapter instance
@@ -4663,13 +4730,33 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
 	return 0;
 }
 
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
-			     struct ufs_pa_layer_attr *pwr_mode)
+/**
+ * ufshcd_dme_change_power_mode() - UniPro DME Power Mode change sequence
+ * @hba: per-adapter instance
+ * @pwr_mode: pointer to the target power mode (gear/lane) attributes
+ * @pmc_policy: Power Mode change policy
+ *
+ * This function handles the low-level DME (Device Management Entity)
+ * configuration required to transition the UFS link to a new power mode. It
+ * performs the following steps:
+ * 1. Checks if the requested mode matches the current state.
+ * 2. Sets M-PHY and UniPro attributes including Gear (PA_RXGEAR/TXGEAR),
+ *    Lanes, Termination, and HS Series (PA_HSSERIES).
+ * 3. Configures default UniPro timeout values (DL_FC0, etc.) unless
+ *    explicitly skipped via quirks.
+ * 4. Triggers the actual hardware mode change via ufshcd_uic_change_pwr_mode().
+ * 5. Updates the HBA's cached power information on success.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+static int ufshcd_dme_change_power_mode(struct ufs_hba *hba,
+					struct ufs_pa_layer_attr *pwr_mode,
+					enum ufshcd_pmc_policy pmc_policy)
 {
 	int ret;
 
 	/* if already configured to the requested pwr_mode */
-	if (!hba->force_pmc &&
+	if (pmc_policy == UFSHCD_PMC_POLICY_DONT_FORCE &&
 	    pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
 	    pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
 	    pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
@@ -4748,32 +4835,68 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
 	return ret;
 }
 
+/**
+ * ufshcd_change_power_mode() - Change UFS Link Power Mode
+ * @hba: per-adapter instance
+ * @pwr_mode: pointer to the target power mode (gear/lane) attributes
+ * @pmc_policy: Power Mode change policy
+ *
+ * This function handles the high-level sequence for changing the UFS link
+ * power mode. It triggers vendor-specific pre-change notification,
+ * executes the DME (Device Management Entity) power mode change sequence,
+ * and, upon success, triggers vendor-specific post-change notification.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int ufshcd_change_power_mode(struct ufs_hba *hba,
+			     struct ufs_pa_layer_attr *pwr_mode,
+			     enum ufshcd_pmc_policy pmc_policy)
+{
+	int ret;
+
+	ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, pwr_mode);
+
+	ret = ufshcd_dme_change_power_mode(hba, pwr_mode, pmc_policy);
+
+	if (!ret)
+		ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, pwr_mode);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_change_power_mode);
+
 /**
  * ufshcd_config_pwr_mode - configure a new power mode
  * @hba: per-adapter instance
  * @desired_pwr_mode: desired power configuration
+ * @pmc_policy: Power Mode change policy
  *
  * Return: 0 upon success; < 0 upon failure.
  */
 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
-		struct ufs_pa_layer_attr *desired_pwr_mode)
+			   struct ufs_pa_layer_attr *desired_pwr_mode,
+			   enum ufshcd_pmc_policy pmc_policy)
 {
 	struct ufs_pa_layer_attr final_params = { 0 };
 	int ret;
 
-	ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
-					desired_pwr_mode, &final_params);
+	ret = ufshcd_vops_negotiate_pwr_mode(hba, desired_pwr_mode,
+					     &final_params);
+	if (ret) {
+		if (ret != -ENOTSUPP)
+			dev_err(hba->dev, "Failed to negotiate power mode: %d, use desired as is\n",
+				ret);
 
-	if (ret)
 		memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
+	}
 
-	ret = ufshcd_change_power_mode(hba, &final_params);
-
-	if (!ret)
-		ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
-					&final_params);
+	ret = ufshcd_config_tx_eq_settings(hba, &final_params, false);
+	if (ret)
+		dev_warn(hba->dev, "Failed to configure TX Equalization for HS-G%u, Rate-%s: %d\n",
+			 final_params.gear_tx,
+			 ufs_hs_rate_to_str(final_params.hs_rate), ret);
 
-	return ret;
+	return ufshcd_change_power_mode(hba, &final_params, pmc_policy);
 }
 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
 
@@ -5568,8 +5691,11 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 
 	guard(spinlock_irqsave)(hba->host->host_lock);
 	cmd = hba->active_uic_cmd;
-	if (!cmd)
+	if (!cmd) {
+		dev_err(hba->dev,
+			"No active UIC command. Maybe a timeout occurred?\n");
 		return retval;
+	}
 
 	if (ufshcd_is_auto_hibern8_error(hba, intr_status))
 		hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
@@ -6772,6 +6898,7 @@ static void ufshcd_err_handler(struct work_struct *work)
 		spin_unlock_irqrestore(hba->host->host_lock, flags);
 		ufshcd_print_host_state(hba);
 		ufshcd_print_pwr_info(hba);
+		ufshcd_print_tx_eq_params(hba);
 		ufshcd_print_evt_hist(hba);
 		ufshcd_print_tmrs(hba, hba->outstanding_tasks);
 		ufshcd_print_trs_all(hba, pr_prdt);
@@ -6830,14 +6957,13 @@ static void ufshcd_err_handler(struct work_struct *work)
 		 * are sent via bsg and/or sysfs.
 		 */
 		down_write(&hba->clk_scaling_lock);
-		hba->force_pmc = true;
-		pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+		pmc_err = ufshcd_config_pwr_mode(hba, &hba->pwr_info,
+						 UFSHCD_PMC_POLICY_FORCE);
 		if (pmc_err) {
 			needs_reset = true;
 			dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
 					__func__, pmc_err);
 		}
-		hba->force_pmc = false;
 		ufshcd_print_pwr_info(hba);
 		up_write(&hba->clk_scaling_lock);
 		spin_lock_irqsave(hba->host->host_lock, flags);
@@ -6963,10 +7089,19 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
 	}
 
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
-	if ((reg & UIC_DME_ERROR) &&
-	    (reg & UIC_DME_ERROR_CODE_MASK)) {
+	if (reg & UIC_DME_ERROR) {
 		ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
-		hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+
+		if (reg & UIC_DME_ERROR_CODE_MASK)
+			hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+
+		if (reg & UIC_DME_QOS_MASK) {
+			atomic_set(&hba->dme_qos_notification,
+				   reg & UIC_DME_QOS_MASK);
+			if (hba->dme_qos_sysfs_handle)
+				sysfs_notify_dirent(hba->dme_qos_sysfs_handle);
+		}
+
 		retval |= IRQ_HANDLED;
 	}
 
@@ -7036,6 +7171,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
 			ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
 					 "host_regs: ");
 			ufshcd_print_pwr_info(hba);
+			ufshcd_print_tx_eq_params(hba);
 		}
 		ufshcd_schedule_eh_work(hba);
 		retval |= IRQ_HANDLED;
@@ -7084,16 +7220,17 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
 /**
  * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
  * @hba: per adapter instance
+ * @reset_iag: true, to reset MCQ IAG counter and timer of the CQ
  *
  * Return: IRQ_HANDLED if interrupt is handled.
  */
-static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
+static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba, bool reset_iag)
 {
 	struct ufs_hw_queue *hwq;
 	unsigned long outstanding_cqs;
 	unsigned int nr_queues;
 	int i, ret;
-	u32 events;
+	u32 events, reg;
 
 	ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
 	if (ret)
@@ -7108,6 +7245,12 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
 		if (events)
 			ufshcd_mcq_write_cqis(hba, events, i);
 
+		if (reset_iag) {
+			reg = ufshcd_mcq_read_mcqiacr(hba, i);
+			reg |= INT_AGGR_COUNTER_AND_TIMER_RESET;
+			ufshcd_mcq_write_mcqiacr(hba, reg, i);
+		}
+
 		if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
 			ufshcd_mcq_poll_cqe_lock(hba, hwq);
 	}
@@ -7141,7 +7284,10 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 		retval |= ufshcd_transfer_req_compl(hba);
 
 	if (intr_status & MCQ_CQ_EVENT_STATUS)
-		retval |= ufshcd_handle_mcq_cq_events(hba);
+		retval |= ufshcd_handle_mcq_cq_events(hba, false);
+
+	if (intr_status & MCQ_IAG_EVENT_STATUS)
+		retval |= ufshcd_handle_mcq_cq_events(hba, true);
 
 	return retval;
 }
@@ -7209,8 +7355,12 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
 	struct ufs_hba *hba = __hba;
 	u32 intr_status, enabled_intr_status;
 
-	/* Move interrupt handling to thread when MCQ & ESI are not enabled */
-	if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
+	/*
+	 * Handle interrupt in thread if MCQ or ESI is disabled,
+	 * and no active UIC command.
+	 */
+	if ((!hba->mcq_enabled || !hba->mcq_esi_enabled) &&
+	    !hba->active_uic_cmd)
 		return IRQ_WAKE_THREAD;
 
 	intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@@ -7817,6 +7967,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
 		ufshcd_print_evt_hist(hba);
 		ufshcd_print_host_state(hba);
 		ufshcd_print_pwr_info(hba);
+		ufshcd_print_tx_eq_params(hba);
 		ufshcd_print_tr(hba, cmd, true);
 	} else {
 		ufshcd_print_tr(hba, cmd, false);
@@ -8794,6 +8945,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 
 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
 		ufshcd_quirk_override_pa_h8time(hba);
+
+	ufshcd_apply_valid_tx_eq_settings(hba);
 }
 
 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -9098,6 +9251,12 @@ static int ufshcd_post_device_init(struct ufs_hba *hba)
 
 	/* UFS device is also active now */
 	ufshcd_set_ufs_dev_active(hba);
+
+	/* Indicate that DME QoS Monitor has been reset */
+	atomic_set(&hba->dme_qos_notification, 0x1);
+	if (hba->dme_qos_sysfs_handle)
+		sysfs_notify_dirent(hba->dme_qos_sysfs_handle);
+
 	ufshcd_force_reset_auto_bkops(hba);
 
 	ufshcd_set_timestamp_attr(hba);
@@ -9112,7 +9271,8 @@ static int ufshcd_post_device_init(struct ufs_hba *hba)
 	if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
 		ufshcd_set_dev_ref_clk(hba);
 	/* Gear up to HS gear. */
-	ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+	ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info,
+				     UFSHCD_PMC_POLICY_DONT_FORCE);
 	if (ret) {
 		dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
 			__func__, ret);
@@ -9730,6 +9890,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
 		hba->is_powered = false;
 		ufs_put_device_desc(hba);
 	}
+	sysfs_put(hba->dme_qos_sysfs_handle);
 }
 
 static int ufshcd_execute_start_stop(struct scsi_device *sdev,
@@ -9929,11 +10090,13 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
 #ifdef CONFIG_PM
 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
 {
+	bool vcc_on = false;
 	int ret = 0;
 
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
 	    !hba->dev_info.is_lu_power_on_wp) {
 		ret = ufshcd_setup_vreg(hba, true);
+		vcc_on = true;
 	} else if (!ufshcd_is_ufs_dev_active(hba)) {
 		if (!ufshcd_is_link_active(hba)) {
 			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
@@ -9944,6 +10107,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
 				goto vccq_lpm;
 		}
 		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+		vcc_on = true;
 	}
 	goto out;
 
@@ -9952,6 +10116,15 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
 vcc_disable:
 	ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
 out:
+	/*
+	 * On platforms with a slow VCC ramp-up, a delay is needed after
+	 * turning on VCC to ensure the voltage is stable before the
+	 * reference clock is enabled.
+	 */
+	if (hba->quirks & UFSHCD_QUIRK_VCC_ON_DELAY && !ret && vcc_on &&
+	    hba->vreg_info.vcc && !hba->vreg_info.vcc->always_on)
+		usleep_range(1000, 1100);
+
 	return ret;
 }
 #endif /* CONFIG_PM */
@@ -11049,6 +11222,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 		goto out_disable;
 
 	ufs_sysfs_add_nodes(hba->dev);
+	hba->dme_qos_sysfs_handle = sysfs_get_dirent(hba->dev->kobj.sd,
+						     "dme_qos_notification");
 	async_schedule(ufshcd_async_scan, hba);
 
 	device_enable_async_suspend(dev);
diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c
index 6c454ae8a9c8..2154d6286817 100644
--- a/drivers/ufs/host/ufs-amd-versal2.c
+++ b/drivers/ufs/host/ufs-amd-versal2.c
@@ -443,7 +443,6 @@ static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_
 }
 
 static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status,
-					 const struct ufs_pa_layer_attr *dev_max_params,
 					 struct ufs_pa_layer_attr *dev_req_params)
 {
 	struct ufs_versal2_host *host = ufshcd_get_variant(hba);
@@ -451,8 +450,6 @@ static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_ch
 	int ret = 0;
 
 	if (status == PRE_CHANGE) {
-		memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr));
-
 		/* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */
 		if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 &&
 		    !host->ctlecompval1) {
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 76fee3a79c77..77a6c8e44485 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -818,12 +818,10 @@ static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba)
 }
 
 static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
-				const struct ufs_pa_layer_attr *dev_max_params,
 				struct ufs_pa_layer_attr *dev_req_params)
 {
 	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
 	struct phy *generic_phy = ufs->phy;
-	struct ufs_host_params host_params;
 	int ret;
 
 	if (!dev_req_params) {
@@ -832,18 +830,6 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
 		goto out;
 	}
 
-	ufshcd_init_host_params(&host_params);
-
-	/* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */
-	host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba);
-	host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba);
-
-	ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
-	if (ret) {
-		pr_err("%s: failed to determine capabilities\n", __func__);
-		goto out;
-	}
-
 	if (ufs->drv_data->pre_pwr_change)
 		ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
 
@@ -1677,17 +1663,30 @@ static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
 	return ret;
 }
 
+static int exynos_ufs_negotiate_pwr_mode(struct ufs_hba *hba,
+					 const struct ufs_pa_layer_attr *dev_max_params,
+					 struct ufs_pa_layer_attr *dev_req_params)
+{
+	struct ufs_host_params host_params;
+
+	ufshcd_init_host_params(&host_params);
+
+	/* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */
+	host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba);
+	host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba);
+
+	return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
+}
+
 static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
 				enum ufs_notify_change_status status,
-				const struct ufs_pa_layer_attr *dev_max_params,
 				struct ufs_pa_layer_attr *dev_req_params)
 {
 	int ret = 0;
 
 	switch (status) {
 	case PRE_CHANGE:
-		ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params,
-					      dev_req_params);
+		ret = exynos_ufs_pre_pwr_mode(hba, dev_req_params);
 		break;
 	case POST_CHANGE:
 		ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
@@ -2015,6 +2014,7 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
 	.exit				= exynos_ufs_exit,
 	.hce_enable_notify		= exynos_ufs_hce_enable_notify,
 	.link_startup_notify		= exynos_ufs_link_startup_notify,
+	.negotiate_pwr_mode		= exynos_ufs_negotiate_pwr_mode,
 	.pwr_change_notify		= exynos_ufs_pwr_change_notify,
 	.setup_clocks			= exynos_ufs_setup_clocks,
 	.setup_xfer_req			= exynos_ufs_specify_nexus_t_xfer_req,
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 6f2e6bf31225..993e20ac211d 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -298,6 +298,17 @@ static void ufs_hisi_set_dev_cap(struct ufs_host_params *host_params)
 	ufshcd_init_host_params(host_params);
 }
 
+static int ufs_hisi_negotiate_pwr_mode(struct ufs_hba *hba,
+				       const struct ufs_pa_layer_attr *dev_max_params,
+				       struct ufs_pa_layer_attr *dev_req_params)
+{
+	struct ufs_host_params host_params;
+
+	ufs_hisi_set_dev_cap(&host_params);
+
+	return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
+}
+
 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
 {
 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
@@ -362,10 +373,8 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
 
 static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
 				enum ufs_notify_change_status status,
-				const struct ufs_pa_layer_attr *dev_max_params,
 				struct ufs_pa_layer_attr *dev_req_params)
 {
-	struct ufs_host_params host_params;
 	int ret = 0;
 
 	if (!dev_req_params) {
@@ -377,14 +386,6 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
 
 	switch (status) {
 	case PRE_CHANGE:
-		ufs_hisi_set_dev_cap(&host_params);
-		ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
-		if (ret) {
-			dev_err(hba->dev,
-			    "%s: failed to determine capabilities\n", __func__);
-			goto out;
-		}
-
 		ufs_hisi_pwr_change_pre_change(hba);
 		break;
 	case POST_CHANGE:
@@ -543,6 +544,7 @@ static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
 	.name = "hi3660",
 	.init = ufs_hi3660_init,
 	.link_startup_notify = ufs_hisi_link_startup_notify,
+	.negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode,
 	.pwr_change_notify = ufs_hisi_pwr_change_notify,
 	.suspend = ufs_hisi_suspend,
 	.resume = ufs_hisi_resume,
@@ -552,6 +554,7 @@ static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
 	.name = "hi3670",
 	.init = ufs_hi3670_init,
 	.link_startup_notify = ufs_hisi_link_startup_notify,
+	.negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode,
 	.pwr_change_notify = ufs_hisi_pwr_change_notify,
 	.suspend = ufs_hisi_suspend,
 	.resume = ufs_hisi_resume,
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index b3daaa07e925..3991a51263a6 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1317,6 +1317,23 @@ static int ufs_mtk_init(struct ufs_hba *hba)
 	return err;
 }
 
+static int ufs_mtk_negotiate_pwr_mode(struct ufs_hba *hba,
+				      const struct ufs_pa_layer_attr *dev_max_params,
+				      struct ufs_pa_layer_attr *dev_req_params)
+{
+	struct ufs_host_params host_params;
+
+	ufshcd_init_host_params(&host_params);
+	host_params.hs_rx_gear = UFS_HS_G5;
+	host_params.hs_tx_gear = UFS_HS_G5;
+
+	if (dev_max_params->pwr_rx == SLOW_MODE ||
+	    dev_max_params->pwr_tx == SLOW_MODE)
+		host_params.desired_working_mode = UFS_PWM_MODE;
+
+	return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
+}
+
 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
 				     struct ufs_pa_layer_attr *dev_req_params)
 {
@@ -1372,26 +1389,10 @@ static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba)
 }
 
 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
-				const struct ufs_pa_layer_attr *dev_max_params,
 				struct ufs_pa_layer_attr *dev_req_params)
 {
 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-	struct ufs_host_params host_params;
-	int ret;
-
-	ufshcd_init_host_params(&host_params);
-	host_params.hs_rx_gear = UFS_HS_G5;
-	host_params.hs_tx_gear = UFS_HS_G5;
-
-	if (dev_max_params->pwr_rx == SLOW_MODE ||
-	    dev_max_params->pwr_tx == SLOW_MODE)
-		host_params.desired_working_mode = UFS_PWM_MODE;
-
-	ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
-	if (ret) {
-		pr_info("%s: failed to determine capabilities\n",
-			__func__);
-	}
+	int ret = 0;
 
 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
 		ufs_mtk_adjust_sync_length(hba);
@@ -1503,7 +1504,6 @@ static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
 
 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
 				enum ufs_notify_change_status stage,
-				const struct ufs_pa_layer_attr *dev_max_params,
 				struct ufs_pa_layer_attr *dev_req_params)
 {
 	int ret = 0;
@@ -1515,8 +1515,7 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
 			reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
 			ufs_mtk_auto_hibern8_disable(hba);
 		}
-		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
-					     dev_req_params);
+		ret = ufs_mtk_pre_pwr_change(hba, dev_req_params);
 		break;
 	case POST_CHANGE:
 		if (ufshcd_is_auto_hibern8_supported(hba))
@@ -1960,6 +1959,8 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
 
 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
 {
+	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
 
 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) {
@@ -1971,6 +1972,15 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
 		hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM;
 	}
 
+	/*
+	 * Add a delay after enabling UFS5 VCC to ensure the voltage
+	 * is stable before the refclk is enabled.
+	 */
+	if (hba->dev_info.wspecversion >= 0x0500 &&
+	    (host->ip_ver == IP_VER_MT6995_A0 ||
+	     host->ip_ver == IP_VER_MT6995_B0))
+		hba->quirks |= UFSHCD_QUIRK_VCC_ON_DELAY;
+
 	ufs_mtk_vreg_fix_vcc(hba);
 	ufs_mtk_vreg_fix_vccqx(hba);
 	ufs_mtk_fix_ahit(hba);
@@ -2318,6 +2328,7 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
 	.setup_clocks        = ufs_mtk_setup_clocks,
 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
 	.link_startup_notify = ufs_mtk_link_startup_notify,
+	.negotiate_pwr_mode  = ufs_mtk_negotiate_pwr_mode,
 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 9747277f11e8..8547a6f04990 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -220,6 +220,10 @@ enum {
 	IP_VER_MT6991_B0 = 0x10470000,
 	IP_VER_MT6993    = 0x10480000,
 
+	/* UFSHCI 5.0 */
+	IP_VER_MT6995_A0 = 0x10490000,
+	IP_VER_MT6995_B0 = 0x10500000,
+
 	IP_VER_NONE      = 0xFFFFFFFF
 };
 
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 375fd24ba458..5a58ffef3d27 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -966,13 +966,21 @@ static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_l
 	}
 }
 
-static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
-				enum ufs_notify_change_status status,
-				const struct ufs_pa_layer_attr *dev_max_params,
-				struct ufs_pa_layer_attr *dev_req_params)
+static int ufs_qcom_negotiate_pwr_mode(struct ufs_hba *hba,
+				       const struct ufs_pa_layer_attr *dev_max_params,
+				       struct ufs_pa_layer_attr *dev_req_params)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 	struct ufs_host_params *host_params = &host->host_params;
+
+	return ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
+}
+
+static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+				      enum ufs_notify_change_status status,
+				      struct ufs_pa_layer_attr *dev_req_params)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 	int ret = 0;
 
 	if (!dev_req_params) {
@@ -982,13 +990,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 
 	switch (status) {
 	case PRE_CHANGE:
-		ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
-		if (ret) {
-			dev_err(hba->dev, "%s: failed to determine capabilities\n",
-					__func__);
-			return ret;
-		}
-
 		/*
 		 * During UFS driver probe, always update the PHY gear to match the negotiated
 		 * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled,
@@ -1068,10 +1069,188 @@ static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba)
 		dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err);
 }
 
+/**
+ * ufs_qcom_double_t_adapt_l0l1l2l3 - Create a new adapt that doubles the
+ * adaptation duration TADAPT_L0_L1_L2_L3 derived from the old adapt.
+ *
+ * @old_adapt: Original ADAPT_L0_L1_L2_L3 capability
+ *
+ * ADAPT_length_L0_L1_L2_L3 formula from M-PHY spec:
+ * if (ADAPT_range_L0_L1_L2_L3 == COARSE) {
+ *   ADAPT_length_L0_L1_L2_L3 = [0, 12]
+ *   ADAPT_L0_L1_L2_L3 = 215 x 2^ADAPT_length_L0_L1_L2_L3
+ * } else if (ADAPT_range_L0_L1_L2_L3 == FINE) {
+ *   ADAPT_length_L0_L1_L2_L3 = [0, 127]
+ *   TADAPT_L0_L1_L2_L3 = 215 x (ADAPT_length_L0_L1_L2_L3 + 1)
+ * }
+ *
+ * To double the adaptation duration TADAPT_L0_L1_L2_L3:
+ * 1. If adapt range is COARSE (1'b1), new adapt = old adapt + 1.
+ * 2. If adapt range is FINE (1'b0):
+ *   a) If old adapt length is < 64, (new adapt + 1) = 2 * (old adapt + 1).
+ *   b) If old adapt length is >= 64, set new adapt to 0x88 using COARSE
+ *      range, because new adapt get from equation in a) shall exceed 127.
+ *
+ * Examples:
+ * ADAPT_range_L0_L1_L2_L3 | ADAPT_length_L0_L1_L2_L3 | TADAPT_L0_L1_L2_L3 (PAM-4 UI)
+ *		0			3			131072
+ *		0			7			262144
+ *		0			63			2097152
+ *		0			64			2129920
+ *		0			127			4194304
+ *		1			8			8388608
+ *		1			9			16777216
+ *		1			10			33554432
+ *		1			11			67108864
+ *		1			12			134217728
+ *
+ * Return: new adapt.
+ */
+static u32 ufs_qcom_double_t_adapt_l0l1l2l3(u32 old_adapt)
+{
+	u32 adapt_length = old_adapt & ADAPT_LENGTH_MASK;
+	u32 new_adapt;
+
+	if (IS_ADAPT_RANGE_COARSE(old_adapt)) {
+		new_adapt = (adapt_length + 1) | ADAPT_RANGE_BIT;
+	} else {
+		if (adapt_length < 64)
+			new_adapt = (adapt_length << 1) + 1;
+		else
+			/*
+			 * 0x88 is the very coarse Adapt value which is two
+			 * times of the largest fine Adapt value (0x7F)
+			 */
+			new_adapt = 0x88;
+	}
+
+	return new_adapt;
+}
+
+static void ufs_qcom_limit_max_gear(struct ufs_hba *hba,
+				    enum ufs_hs_gear_tag gear)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+	struct ufs_host_params *host_params = &host->host_params;
+
+	host_params->hs_tx_gear = gear;
+	host_params->hs_rx_gear = gear;
+	pwr_info->gear_tx = gear;
+	pwr_info->gear_rx = gear;
+
+	dev_warn(hba->dev, "Limited max gear of host and device to HS-G%d\n", gear);
+}
+
+static void ufs_qcom_fixup_tx_adapt_l0l1l2l3(struct ufs_hba *hba)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+	struct ufs_host_params *host_params = &host->host_params;
+	u32 old_adapt, new_adapt, actual_adapt;
+	bool limit_speed = false;
+	int err;
+
+	if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 ||
+	    host_params->hs_tx_gear <= UFS_HS_G5 ||
+	    pwr_info->gear_tx <= UFS_HS_G5)
+		return;
+
+	err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), &old_adapt);
+	if (err)
+		goto out;
+
+	if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) {
+		dev_err(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 value (0x%x) exceeds MAX\n",
+			old_adapt);
+		err = -ERANGE;
+		goto out;
+	}
+
+	new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt);
+	dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptInitialL0L1L2L3 = 0x%x, new value = 0x%x\n",
+		old_adapt, new_adapt);
+
+	/*
+	 * 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs
+	 * can accept 0x8D but some cannot.
+	 */
+	if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX ||
+	    (new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) {
+		err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3),
+				     new_adapt);
+		if (err)
+			goto out;
+
+		err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3),
+				     &actual_adapt);
+		if (err)
+			goto out;
+
+		if (actual_adapt != new_adapt) {
+			limit_speed = true;
+			dev_warn(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 0x%x, expect 0x%x\n",
+				 actual_adapt, new_adapt);
+		}
+	} else {
+		limit_speed = true;
+		dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptInitialL0L1L2L3 (0x%x) is too large!\n",
+			 new_adapt);
+	}
+
+	err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3), &old_adapt);
+	if (err)
+		goto out;
+
+	if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) {
+		dev_err(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 value (0x%x) exceeds MAX\n",
+			old_adapt);
+		err = -ERANGE;
+		goto out;
+	}
+
+	new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt);
+	dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptRefreshL0L1L2L3 = 0x%x, new value = 0x%x\n",
+		old_adapt, new_adapt);
+
+	/*
+	 * 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs
+	 * can accept 0x8D but some cannot.
+	 */
+	if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX ||
+	    (new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) {
+		err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3),
+				     new_adapt);
+		if (err)
+			goto out;
+
+		err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3),
+				     &actual_adapt);
+		if (err)
+			goto out;
+
+		if (actual_adapt != new_adapt) {
+			limit_speed = true;
+			dev_warn(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 0x%x, expect 0x%x\n",
+				 new_adapt, actual_adapt);
+		}
+	} else {
+		limit_speed = true;
+		dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptRefreshL0L1L2L3 (0x%x) is too large!\n",
+			 new_adapt);
+	}
+
+out:
+	if (limit_speed || err)
+		ufs_qcom_limit_max_gear(hba, UFS_HS_G5);
+}
+
 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
 {
 	int err = 0;
 
+	ufs_qcom_fixup_tx_adapt_l0l1l2l3(hba);
+
 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
 		err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
 
@@ -1205,6 +1384,8 @@ static void ufs_qcom_set_host_caps(struct ufs_hba *hba)
 
 static void ufs_qcom_set_caps(struct ufs_hba *hba)
 {
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
 	hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 	hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
 	hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
@@ -1212,6 +1393,9 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
 	hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
 
+	if (host->hw_ver.major >= 0x7)
+		hba->caps |= UFSHCD_CAP_TX_EQUALIZATION;
+
 	ufs_qcom_set_host_caps(hba);
 }
 
@@ -2326,6 +2510,387 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
 	return min_t(u32, gear, hba->max_pwr_info.info.gear_rx);
 }
 
+static int ufs_qcom_host_eom_config(struct ufs_hba *hba, int lane,
+				    const struct ufs_eom_coord *eom_coord,
+				    u32 target_test_count)
+{
+	enum ufs_eom_eye_mask eye_mask = eom_coord->eye_mask;
+	int v_step = eom_coord->v_step;
+	int t_step = eom_coord->t_step;
+	u32 volt_step, timing_step;
+	int ret;
+
+	if (abs(v_step) > UFS_QCOM_EOM_VOLTAGE_STEPS_MAX) {
+		dev_err(hba->dev, "Invalid EOM Voltage Step: %d\n", v_step);
+		return -ERANGE;
+	}
+
+	if (abs(t_step) > UFS_QCOM_EOM_TIMING_STEPS_MAX) {
+		dev_err(hba->dev, "Invalid EOM Timing Step: %d\n", t_step);
+		return -ERANGE;
+	}
+
+	if (v_step < 0)
+		volt_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-v_step);
+	else
+		volt_step = (u32)v_step;
+
+	if (t_step < 0)
+		timing_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-t_step);
+	else
+		timing_step = (u32)t_step;
+
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE,
+				UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+			     BIT(eye_mask) | RX_EYEMON_EXTENDED_VRANGE_BIT);
+	if (ret) {
+		dev_err(hba->dev, "Failed to enable Host EOM on Lane %d: %d\n",
+			lane, ret);
+		return ret;
+	}
+
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TIMING_STEPS,
+				UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+			     timing_step);
+	if (ret) {
+		dev_err(hba->dev, "Failed to set Host EOM timing step on Lane %d: %d\n",
+			lane, ret);
+		return ret;
+	}
+
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_VOLTAGE_STEPS,
+				UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+			     volt_step);
+	if (ret) {
+		dev_err(hba->dev, "Failed to set Host EOM voltage step on Lane %d: %d\n",
+			lane, ret);
+		return ret;
+	}
+
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TARGET_TEST_COUNT,
+				UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+			     target_test_count);
+	if (ret)
+		dev_err(hba->dev, "Failed to set Host EOM target test count on Lane %d: %d\n",
+			lane, ret);
+
+	return ret;
+}
+
+static int ufs_qcom_host_eom_may_stop(struct ufs_hba *hba, int lane,
+				      u32 target_test_count, u32 *err_count)
+{
+	u32 start, tested_count, error_count;
+	int ret;
+
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_START,
+				UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+			     &start);
+	if (ret) {
+		dev_err(hba->dev, "Failed to get Host EOM start status on Lane %d: %d\n",
+			lane, ret);
+		return ret;
+	}
+
+	if (start & 0x1)
+		return -EAGAIN;
+
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TESTED_COUNT,
+				UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+			     &tested_count);
+	if (ret) {
+		dev_err(hba->dev, "Failed to get Host EOM tested count on Lane %d: %d\n",
+			lane, ret);
+		return ret;
+	}
+
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ERROR_COUNT,
+				UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+			     &error_count);
+	if (ret) {
+		dev_err(hba->dev, "Failed to get Host EOM error count on Lane %d: %d\n",
+			lane, ret);
+		return ret;
+	}
+
+	/* EOM can stop */
+	if ((tested_count >= target_test_count - 3) || error_count > 0) {
+		*err_count = error_count;
+
+		/* Disable EOM */
+		ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE,
+					UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+				     0x0);
+		if (ret) {
+			dev_err(hba->dev, "Failed to disable Host EOM on Lane %d: %d\n",
+				lane, ret);
+			return ret;
+		}
+	} else {
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static int ufs_qcom_host_eom_scan(struct ufs_hba *hba, int num_lanes,
+				  const struct ufs_eom_coord *eom_coord,
+				  u32 target_test_count, u32 *err_count)
+{
+	bool eom_stopped[PA_MAXDATALANES] = { 0 };
+	int lane, ret;
+	u32 setting;
+
+	if (!err_count || !eom_coord)
+		return -EINVAL;
+
+	if (target_test_count < UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN) {
+		dev_err(hba->dev, "Target test count (%u) too small for Host EOM\n",
+			target_test_count);
+		return -ERANGE;
+	}
+
+	for (lane = 0; lane < num_lanes; lane++) {
+		ret = ufs_qcom_host_eom_config(hba, lane, eom_coord,
+					       target_test_count);
+		if (ret) {
+			dev_err(hba->dev, "Failed to config Host RX EOM: %d\n", ret);
+			return ret;
+		}
+	}
+
+	/*
+	 * Trigger a PACP_PWR_req to kick start EOM, but not to really change
+	 * the Power Mode.
+	 */
+	ret = ufshcd_uic_change_pwr_mode(hba, FAST_MODE << 4 | FAST_MODE);
+	if (ret) {
+		dev_err(hba->dev, "Failed to change power mode to kick start Host EOM: %d\n",
+			ret);
+		return ret;
+	}
+
+more_burst:
+	/* Create burst on Host RX Lane. */
+	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &setting);
+
+	for (lane = 0; lane < num_lanes; lane++) {
+		if (eom_stopped[lane])
+			continue;
+
+		ret = ufs_qcom_host_eom_may_stop(hba, lane, target_test_count,
+						 &err_count[lane]);
+		if (!ret) {
+			eom_stopped[lane] = true;
+		} else if (ret == -EAGAIN) {
+			/* Need more burst to excercise EOM */
+			goto more_burst;
+		} else {
+			dev_err(hba->dev, "Failed to stop Host EOM: %d\n", ret);
+			return ret;
+		}
+
+		dev_dbg(hba->dev, "Host RX Lane %d EOM, v_step %d, t_step %d, error count %u\n",
+			lane, eom_coord->v_step, eom_coord->t_step,
+			err_count[lane]);
+	}
+
+	return 0;
+}
+
+static int ufs_qcom_host_sw_rx_fom(struct ufs_hba *hba, int num_lanes, u32 *fom)
+{
+	const struct ufs_eom_coord *eom_coord = sw_rx_fom_eom_coords_g6;
+	u32 eom_err_count[PA_MAXDATALANES] = { 0 };
+	u32 curr_ahit;
+	int lane, i, ret;
+
+	if (!fom)
+		return -EINVAL;
+
+	/* Stop the auto hibernate idle timer */
+	curr_ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+	if (curr_ahit)
+		ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), PA_NO_ADAPT);
+	if (ret) {
+		dev_err(hba->dev, "Failed to select NO_ADAPT before starting Host EOM: %d\n", ret);
+		goto out;
+	}
+
+	for (i = 0; i < SW_RX_FOM_EOM_COORDS; i++, eom_coord++) {
+		ret = ufs_qcom_host_eom_scan(hba, num_lanes, eom_coord,
+					     UFS_QCOM_EOM_TARGET_TEST_COUNT_G6,
+					     eom_err_count);
+		if (ret) {
+			dev_err(hba->dev, "Failed to run Host EOM scan: %d\n", ret);
+			break;
+		}
+
+		for (lane = 0; lane < num_lanes; lane++) {
+			/* Bad coordinates have no weights */
+			if (eom_err_count[lane])
+				continue;
+			fom[lane] += SW_RX_FOM_EOM_COORDS_WEIGHT;
+		}
+	}
+
+out:
+	/* Restore the auto hibernate idle timer */
+	if (curr_ahit)
+		ufshcd_writel(hba, curr_ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+	return ret;
+}
+
+static int ufs_qcom_get_rx_fom(struct ufs_hba *hba,
+			       struct ufs_pa_layer_attr *pwr_mode,
+			       struct tx_eqtr_iter *h_iter,
+			       struct tx_eqtr_iter *d_iter)
+{
+	struct ufshcd_tx_eq_params *params __free(kfree) =
+		kzalloc(sizeof(*params), GFP_KERNEL);
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct ufs_pa_layer_attr old_pwr_info;
+	u32 fom[PA_MAXDATALANES] = { 0 };
+	u32 gear = pwr_mode->gear_tx;
+	u32 rate = pwr_mode->hs_rate;
+	int lane, ret;
+
+	if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 ||
+	    gear <= UFS_HS_G5 || !d_iter || !d_iter->is_updated)
+		return 0;
+
+	if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX)
+		return -ERANGE;
+
+	if (!params)
+		return -ENOMEM;
+
+	memcpy(&old_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
+
+	memcpy(params, &hba->tx_eq_params[gear - 1], sizeof(struct ufshcd_tx_eq_params));
+	for (lane = 0; lane < pwr_mode->lane_rx; lane++) {
+		params->device[lane].preshoot = d_iter->preshoot;
+		params->device[lane].deemphasis = d_iter->deemphasis;
+	}
+
+	/* Use TX EQTR settings as Device's TX Equalization settings. */
+	ret = ufshcd_apply_tx_eq_settings(hba, params, gear);
+	if (ret) {
+		dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n",
+			__func__, gear, ret);
+		return ret;
+	}
+
+	/* Force PMC to target HS Gear to use new TX Equalization settings. */
+	ret = ufshcd_change_power_mode(hba, pwr_mode, UFSHCD_PMC_POLICY_FORCE);
+	if (ret) {
+		dev_err(hba->dev, "%s: Failed to change power mode to HS-G%u, Rate-%s: %d\n",
+			__func__, gear, ufs_hs_rate_to_str(rate), ret);
+		return ret;
+	}
+
+	ret = ufs_qcom_host_sw_rx_fom(hba, pwr_mode->lane_rx, fom);
+	if (ret) {
+		dev_err(hba->dev, "Failed to get SW FOM of TX (PreShoot: %u, DeEmphasis: %u): %d\n",
+			d_iter->preshoot, d_iter->deemphasis, ret);
+		return ret;
+	}
+
+	/* Restore Device's TX Equalization settings. */
+	ret = ufshcd_apply_tx_eq_settings(hba, &hba->tx_eq_params[gear - 1], gear);
+	if (ret) {
+		dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n",
+			__func__, gear, ret);
+		return ret;
+	}
+
+	/* Restore Power Mode. */
+	ret = ufshcd_change_power_mode(hba, &old_pwr_info, UFSHCD_PMC_POLICY_FORCE);
+	if (ret) {
+		dev_err(hba->dev, "%s: Failed to retore power mode to HS-G%u: %d\n",
+			__func__, old_pwr_info.gear_tx, ret);
+		return ret;
+	}
+
+	for (lane = 0; lane < pwr_mode->lane_rx; lane++)
+		d_iter->fom[lane] = fom[lane];
+
+	return 0;
+}
+
+static int ufs_qcom_apply_tx_eqtr_settings(struct ufs_hba *hba,
+					   struct ufs_pa_layer_attr *pwr_mode,
+					   struct tx_eqtr_iter *h_iter,
+					   struct tx_eqtr_iter *d_iter)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	u32 setting = 0;
+	int lane;
+
+	if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1)
+		return 0;
+
+	for (lane = 0; lane < pwr_mode->lane_tx; lane++) {
+		setting |= TX_HS_PRESHOOT_BITS(lane, h_iter->preshoot);
+		setting |= TX_HS_DEEMPHASIS_BITS(lane, h_iter->deemphasis);
+	}
+
+	return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING), setting);
+}
+
+static int ufs_qcom_tx_eqtr_notify(struct ufs_hba *hba,
+				   enum ufs_notify_change_status status,
+				   struct ufs_pa_layer_attr *pwr_mode)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct ufs_pa_layer_attr pwr_mode_hs_g1 = {
+		.gear_rx = UFS_HS_G1,
+		.gear_tx = UFS_HS_G1,
+		.lane_rx = pwr_mode->lane_rx,
+		.lane_tx = pwr_mode->lane_tx,
+		.pwr_rx = FAST_MODE,
+		.pwr_tx = FAST_MODE,
+		.hs_rate = pwr_mode->hs_rate,
+	};
+	u32 gear = pwr_mode->gear_tx;
+	u32 rate = pwr_mode->hs_rate;
+	int ret;
+
+	if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1)
+		return 0;
+
+	if (status == PRE_CHANGE) {
+		ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXEQG1SETTING),
+				     &host->saved_tx_eq_g1_setting);
+		if (ret)
+			return ret;
+
+		/* PMC to target HS Gear. */
+		ret = ufshcd_change_power_mode(hba, pwr_mode,
+					       UFSHCD_PMC_POLICY_DONT_FORCE);
+		if (ret)
+			dev_err(hba->dev, "%s: Failed to PMC to target HS-G%u, Rate-%s: %d\n",
+				__func__, gear, ufs_hs_rate_to_str(rate), ret);
+	} else {
+		ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING),
+				     host->saved_tx_eq_g1_setting);
+		if (ret)
+			return ret;
+
+		/* PMC back to HS-G1. */
+		ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1,
+					       UFSHCD_PMC_POLICY_DONT_FORCE);
+		if (ret)
+			dev_err(hba->dev, "%s: Failed to PMC to HS-G1, Rate-%s: %d\n",
+				__func__, ufs_hs_rate_to_str(rate), ret);
+	}
+
+	return ret;
+}
+
 /*
  * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
  *
@@ -2341,6 +2906,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
 	.setup_clocks           = ufs_qcom_setup_clocks,
 	.hce_enable_notify      = ufs_qcom_hce_enable_notify,
 	.link_startup_notify    = ufs_qcom_link_startup_notify,
+	.negotiate_pwr_mode	= ufs_qcom_negotiate_pwr_mode,
 	.pwr_change_notify	= ufs_qcom_pwr_change_notify,
 	.apply_dev_quirks	= ufs_qcom_apply_dev_quirks,
 	.fixup_dev_quirks       = ufs_qcom_fixup_dev_quirks,
@@ -2355,6 +2921,9 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
 	.get_outstanding_cqs	= ufs_qcom_get_outstanding_cqs,
 	.config_esi		= ufs_qcom_config_esi,
 	.freq_to_gear_speed	= ufs_qcom_freq_to_gear_speed,
+	.get_rx_fom		= ufs_qcom_get_rx_fom,
+	.apply_tx_eqtr_settings	= ufs_qcom_apply_tx_eqtr_settings,
+	.tx_eqtr_notify		= ufs_qcom_tx_eqtr_notify,
 };
 
 static const struct ufs_hba_variant_ops ufs_hba_qcom_sa8255p_vops = {
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 1111ab34da01..5d083331a7f4 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -33,6 +33,46 @@
 #define DL_VS_CLK_CFG_MASK GENMASK(9, 0)
 #define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN             BIT(9)
 
+#define UFS_QCOM_EOM_VOLTAGE_STEPS_MAX		127
+#define UFS_QCOM_EOM_TIMING_STEPS_MAX		63
+#define UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN	8
+#define UFS_QCOM_EOM_TARGET_TEST_COUNT_G6	0x3F
+
+#define SW_RX_FOM_EOM_COORDS		23
+#define SW_RX_FOM_EOM_COORDS_WEIGHT	(127 / SW_RX_FOM_EOM_COORDS)
+
+struct ufs_eom_coord {
+	int t_step;
+	int v_step;
+	u8 eye_mask;
+};
+
+static const struct ufs_eom_coord sw_rx_fom_eom_coords_g6[SW_RX_FOM_EOM_COORDS] = {
+	[0] = { -2, -15, UFS_EOM_EYE_MASK_M },
+	[1] = { 0, -15, UFS_EOM_EYE_MASK_M },
+	[2] = { 2, -15, UFS_EOM_EYE_MASK_M },
+	[3] = { -4, -10, UFS_EOM_EYE_MASK_M },
+	[4] = { -2, -10, UFS_EOM_EYE_MASK_M },
+	[5] = { 0, -10, UFS_EOM_EYE_MASK_M },
+	[6] = { 2, -10, UFS_EOM_EYE_MASK_M },
+	[7] = { 4, -10, UFS_EOM_EYE_MASK_M },
+	[8] = { -6, 0, UFS_EOM_EYE_MASK_M },
+	[9] = { -4, 0, UFS_EOM_EYE_MASK_M },
+	[10] = { -2, 0, UFS_EOM_EYE_MASK_M },
+	[11] = { 0, 0, UFS_EOM_EYE_MASK_M },
+	[12] = { 2, 0, UFS_EOM_EYE_MASK_M },
+	[13] = { 4, 0, UFS_EOM_EYE_MASK_M },
+	[14] = { 6, 0, UFS_EOM_EYE_MASK_M },
+	[15] = { -4, 10, UFS_EOM_EYE_MASK_M },
+	[16] = { -2, 10, UFS_EOM_EYE_MASK_M },
+	[17] = { 0, 10, UFS_EOM_EYE_MASK_M },
+	[18] = { 2, 10, UFS_EOM_EYE_MASK_M },
+	[19] = { 4, 10, UFS_EOM_EYE_MASK_M },
+	[20] = { -2, 15, UFS_EOM_EYE_MASK_M },
+	[21] = { 0, 15, UFS_EOM_EYE_MASK_M },
+	[22] = { 2, 15, UFS_EOM_EYE_MASK_M },
+};
+
 /* Qualcomm MCQ Configuration */
 #define UFS_QCOM_MCQCAP_QCFGPTR     224  /* 0xE0 in hex */
 #define UFS_QCOM_MCQ_CONFIG_OFFSET  (UFS_QCOM_MCQCAP_QCFGPTR * 0x200)  /* 0x1C000 */
@@ -308,6 +348,8 @@ struct ufs_qcom_host {
 	u32 phy_gear;
 
 	bool esi_enabled;
+
+	u32 saved_tx_eq_g1_setting;
 };
 
 struct ufs_qcom_drvdata {
diff --git a/drivers/ufs/host/ufs-rockchip.c b/drivers/ufs/host/ufs-rockchip.c
index 7fff34513a60..bac68f238e1c 100644
--- a/drivers/ufs/host/ufs-rockchip.c
+++ b/drivers/ufs/host/ufs-rockchip.c
@@ -6,7 +6,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of.h>
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
index 65bd8fb96b99..a5e8c591bead 100644
--- a/drivers/ufs/host/ufs-sprd.c
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -161,14 +161,11 @@ static int ufs_sprd_common_init(struct ufs_hba *hba)
 
 static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
 				enum ufs_notify_change_status status,
-				const struct ufs_pa_layer_attr *dev_max_params,
 				struct ufs_pa_layer_attr *dev_req_params)
 {
 	struct ufs_sprd_host *host = ufshcd_get_variant(hba);
 
 	if (status == PRE_CHANGE) {
-		memcpy(dev_req_params, dev_max_params,
-			sizeof(struct ufs_pa_layer_attr));
 		if (host->unipro_ver >= UFS_UNIPRO_VER_1_8)
 			ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
 						   PA_INITIAL_ADAPT);
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 5f65dfad1a71..effa3c7a01c5 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -145,7 +145,8 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
 
 	pwr_info.lane_rx = lanes;
 	pwr_info.lane_tx = lanes;
-	ret = ufshcd_config_pwr_mode(hba, &pwr_info);
+	ret = ufshcd_change_power_mode(hba, &pwr_info,
+				       UFSHCD_PMC_POLICY_DONT_FORCE);
 	if (ret)
 		dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
 			__func__, lanes, ret);
@@ -154,17 +155,15 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
 
 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
 				enum ufs_notify_change_status status,
-				const struct ufs_pa_layer_attr *dev_max_params,
 				struct ufs_pa_layer_attr *dev_req_params)
 {
 	int err = 0;
 
 	switch (status) {
 	case PRE_CHANGE:
-		if (ufshcd_is_hs_mode(dev_max_params) &&
+		if (ufshcd_is_hs_mode(dev_req_params) &&
 		    (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
 			ufs_intel_set_lanes(hba, 2);
-		memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
 		break;
 	case POST_CHANGE:
 		if (ufshcd_is_hs_mode(dev_req_params)) {
@@ -695,6 +694,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
 	{ PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
 	{ PCI_VDEVICE(INTEL, 0x4D47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
+	{ PCI_VDEVICE(INTEL, 0xD335), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
 	{ }	/* terminate list */
 };
 
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index ec050d8f99f1..8d36f6783f87 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -2016,6 +2016,7 @@ static const struct target_core_fabric_ops usbg_ops = {
 	.tfc_wwn_attrs			= usbg_wwn_attrs,
 	.tfc_tpg_base_attrs		= usbg_base_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 1c22880e7226..9a1253b9d8c5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -2950,6 +2950,8 @@ static const struct target_core_fabric_ops vhost_scsi_ops = {
 	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
 	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
+	.direct_compl_supp		= 1,
 	.default_submit_type		= TARGET_QUEUE_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 3035c7d0f1b7..e33f95c91b09 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1832,6 +1832,7 @@ static const struct target_core_fabric_ops scsiback_ops = {
 	.tfc_tpg_base_attrs		= scsiback_tpg_attrs,
 	.tfc_tpg_param_attrs		= scsiback_param_attrs,
 
+	.default_compl_type		= TARGET_QUEUE_COMPL,
 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
 	.direct_submit_supp		= 1,
 };
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index d32f5841f4f8..9c2a7bbe5891 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -571,6 +571,7 @@ void scsi_put_internal_cmd(struct scsi_cmnd *scmd);
 extern void sdev_disable_disk_events(struct scsi_device *sdev);
 extern void sdev_enable_disk_events(struct scsi_device *sdev);
 extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t);
+extern int scsi_vpd_lun_serial(struct scsi_device *, char *, size_t);
 extern int scsi_vpd_tpg_id(struct scsi_device *, int *);
 
 #ifdef CONFIG_PM
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index f6e12565a81d..7e2011830ba4 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -660,6 +660,10 @@ struct Scsi_Host {
 	 */
 	unsigned nr_hw_queues;
 	unsigned nr_maps;
+
+	/* Asynchronous scan in progress */
+	bool async_scan __guarded_by(&scan_mutex);
+
 	unsigned active_mode:2;
 
 	/*
@@ -678,9 +682,6 @@ struct Scsi_Host {
 	/* Task mgmt function in progress */
 	unsigned tmf_in_progress:1;
 
-	/* Asynchronous scan in progress */
-	unsigned async_scan:1;
-
 	/* Don't resume host in EH */
 	unsigned eh_noresume:1;
 
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index b62d5fcce950..9a0e9f9e1ec4 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -111,6 +111,15 @@
 /* Peripheral Device Text Identification Information */
 #define PD_TEXT_ID_INFO_LEN			256
 
+enum target_compl_type {
+	/* Use the fabric driver's default completion type */
+	TARGET_FABRIC_DEFAULT_COMPL,
+	/* Complete from the backend calling context */
+	TARGET_DIRECT_COMPL,
+	/* Defer completion to the LIO workqueue */
+	TARGET_QUEUE_COMPL,
+};
+
 enum target_submit_type {
 	/* Use the fabric driver's default submission type */
 	TARGET_FABRIC_DEFAULT_SUBMIT,
@@ -741,6 +750,7 @@ struct se_dev_attrib {
 	u32		atomic_granularity;
 	u32		atomic_max_with_boundary;
 	u32		atomic_max_boundary;
+	u8		complete_type;
 	u8		submit_type;
 	struct se_device *da_dev;
 	struct config_group da_group;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 3378ff9ee271..e9039e73d058 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -118,15 +118,21 @@ struct target_core_fabric_ops {
 	 * its entirety before a command is aborted.
 	 */
 	unsigned int write_pending_must_be_called:1;
+	/*
+	 * Set this if the driver does not require calling queue_data_in
+	 * queue_status and check_stop_free from a worker thread when
+	 * completing successful commands.
+	 */
+	unsigned int direct_compl_supp:1;
 	/*
 	 * Set this if the driver supports submitting commands to the backend
 	 * from target_submit/target_submit_cmd.
 	 */
 	unsigned int direct_submit_supp:1;
-	/*
-	 * Set this to a target_submit_type value.
-	 */
+	/* Set this to a target_submit_type value. */
 	u8 default_submit_type;
+	/* Set this to the target_compl_type value. */
+	u8 default_compl_type;
 };
 
 int target_register_template(const struct target_core_fabric_ops *fo);
diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
index 019096beb179..dca6a28f4e86 100644
--- a/include/uapi/scsi/fc/fc_els.h
+++ b/include/uapi/scsi/fc/fc_els.h
@@ -1030,7 +1030,7 @@ struct fc_fn_li_desc {
 					 */
 	__be32		event_count;	/* minimum number of event
 					 * occurrences during the event
-					 * threshold to caause the LI event
+					 * threshold to cause the LI event
 					 */
 	__be32		pname_count;	/* number of portname_list elements */
 	__be64		pname_list[];	/* list of N_Port_Names accessible
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 8563b6648976..cfbc75d8df83 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -287,6 +287,86 @@ struct ufs_pwr_mode_info {
 	struct ufs_pa_layer_attr info;
 };
 
+#define UFS_MAX_LANES	2
+
+/**
+ * struct tx_eqtr_iter - TX Equalization Training iterator
+ * @preshoot_bitmap: PreShoot bitmap
+ * @deemphasis_bitmap: DeEmphasis bitmap
+ * @preshoot: PreShoot value
+ * @deemphasis: DeEmphasis value
+ * @fom: Figure-of-Merit read out from RX_FOM
+ * @is_updated: Flag to indicate if updated since previous iteration
+ */
+struct tx_eqtr_iter {
+	unsigned long preshoot_bitmap;
+	unsigned long deemphasis_bitmap;
+	u8 preshoot;
+	u8 deemphasis;
+	u8 fom[UFS_MAX_LANES];
+	bool is_updated;
+};
+
+/**
+ * struct ufshcd_tx_eq_settings - TX Equalization settings
+ * @preshoot: PreShoot value
+ * @deemphasis: DeEmphasis value
+ * @fom_val: Figure-of-Merit value read out from RX_FOM (Bit[6:0])
+ * @precode_en: Flag to indicate whether need to enable pre-coding
+ */
+struct ufshcd_tx_eq_settings {
+	u8 preshoot;
+	u8 deemphasis;
+	u8 fom_val;
+	bool precode_en;
+};
+
+/**
+ * struct ufshcd_tx_eqtr_data - Data used during TX Equalization Training procedure
+ * @host: Optimal TX EQ settings identified for host TX Lanes during TX EQTR
+ * @device: Optimal TX EQ settings identified for device TX Lanes during TX EQTR
+ * @host_fom: Host TX EQTR FOM record
+ * @device_fom: Device TX EQTR FOM record
+ */
+struct ufshcd_tx_eqtr_data {
+	struct ufshcd_tx_eq_settings host[UFS_MAX_LANES];
+	struct ufshcd_tx_eq_settings device[UFS_MAX_LANES];
+	u8 host_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
+	u8 device_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
+};
+
+/**
+ * struct ufshcd_tx_eqtr_record - TX Equalization Training record
+ * @host_fom: Host TX EQTR FOM record
+ * @device_fom: Device TX EQTR FOM record
+ * @last_record_ts: Timestamp of the most recent TX EQTR record
+ * @last_record_index: Index of the most recent TX EQTR record
+ * @saved_adapt_eqtr: Saved Adaptation length setting for TX EQTR
+ */
+struct ufshcd_tx_eqtr_record {
+	u8 host_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
+	u8 device_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
+	ktime_t last_record_ts;
+	u16 last_record_index;
+	u16 saved_adapt_eqtr;
+};
+
+/**
+ * struct ufshcd_tx_eq_params - TX Equalization parameters structure
+ * @host: TX EQ settings for host TX Lanes
+ * @device: TX EQ settings for device TX Lanes
+ * @eqtr_record: Pointer to TX EQTR record
+ * @is_valid: True if parameter contains valid TX Equalization settings
+ * @is_applied: True if settings have been applied to UniPro of both sides
+ */
+struct ufshcd_tx_eq_params {
+	struct ufshcd_tx_eq_settings host[UFS_MAX_LANES];
+	struct ufshcd_tx_eq_settings device[UFS_MAX_LANES];
+	struct ufshcd_tx_eqtr_record *eqtr_record;
+	bool is_valid;
+	bool is_applied;
+};
+
 /**
  * struct ufs_hba_variant_ops - variant specific callbacks
  * @name: variant name
@@ -302,11 +382,10 @@ struct ufs_pwr_mode_info {
  *                     variant specific Uni-Pro initialization.
  * @link_startup_notify: called before and after Link startup is carried out
  *                       to allow variant specific Uni-Pro initialization.
+ * @negotiate_pwr_mode: called to negotiate power mode.
  * @pwr_change_notify: called before and after a power mode change
  *			is carried out to allow vendor spesific capabilities
- *			to be set. PRE_CHANGE can modify final_params based
- *			on desired_pwr_mode, but POST_CHANGE must not alter
- *			the final_params parameter
+ *			to be set.
  * @setup_xfer_req: called before any transfer request is issued
  *                  to set some things
  * @setup_task_mgmt: called before any task management request is issued
@@ -331,6 +410,11 @@ struct ufs_pwr_mode_info {
  * @config_esi: called to config Event Specific Interrupt
  * @config_scsi_dev: called to configure SCSI device parameters
  * @freq_to_gear_speed: called to map clock frequency to the max supported gear speed
+ * @apply_tx_eqtr_settings: called to apply settings for TX Equalization
+ *	Training settings.
+ * @get_rx_fom: called to get Figure of Merit (FOM) value.
+ * @tx_eqtr_notify: called before and after TX Equalization Training procedure
+ *	to allow platform vendor specific configs to take place.
  */
 struct ufs_hba_variant_ops {
 	const char *name;
@@ -347,10 +431,12 @@ struct ufs_hba_variant_ops {
 				     enum ufs_notify_change_status);
 	int	(*link_startup_notify)(struct ufs_hba *,
 				       enum ufs_notify_change_status);
-	int	(*pwr_change_notify)(struct ufs_hba *,
-			enum ufs_notify_change_status status,
-			const struct ufs_pa_layer_attr *desired_pwr_mode,
-			struct ufs_pa_layer_attr *final_params);
+	int	(*negotiate_pwr_mode)(struct ufs_hba *hba,
+				      const struct ufs_pa_layer_attr *desired_pwr_mode,
+				      struct ufs_pa_layer_attr *final_params);
+	int	(*pwr_change_notify)(struct ufs_hba *hba,
+				     enum ufs_notify_change_status status,
+				     struct ufs_pa_layer_attr *final_params);
 	void	(*setup_xfer_req)(struct ufs_hba *hba, int tag,
 				  bool is_scsi_cmd);
 	void	(*setup_task_mgmt)(struct ufs_hba *, int, u8);
@@ -380,6 +466,17 @@ struct ufs_hba_variant_ops {
 	int	(*config_esi)(struct ufs_hba *hba);
 	void	(*config_scsi_dev)(struct scsi_device *sdev);
 	u32	(*freq_to_gear_speed)(struct ufs_hba *hba, unsigned long freq);
+	int	(*get_rx_fom)(struct ufs_hba *hba,
+			      struct ufs_pa_layer_attr *pwr_mode,
+			      struct tx_eqtr_iter *h_iter,
+			      struct tx_eqtr_iter *d_iter);
+	int	(*apply_tx_eqtr_settings)(struct ufs_hba *hba,
+					  struct ufs_pa_layer_attr *pwr_mode,
+					  struct tx_eqtr_iter *h_iter,
+					  struct tx_eqtr_iter *d_iter);
+	int	(*tx_eqtr_notify)(struct ufs_hba *hba,
+				  enum ufs_notify_change_status status,
+				  struct ufs_pa_layer_attr *pwr_mode);
 };
 
 /* clock gating state  */
@@ -528,6 +625,17 @@ enum ufshcd_state {
 	UFSHCD_STATE_ERROR,
 };
 
+/**
+ * enum ufshcd_pmc_policy - Power Mode change policy
+ * @UFSHCD_PMC_POLICY_DONT_FORCE: Do not force a Power Mode change.
+ * @UFSHCD_PMC_POLICY_FORCE: Force a Power Mode change even if current Power
+ *	Mode is same as target Power Mode.
+ */
+enum ufshcd_pmc_policy {
+	UFSHCD_PMC_POLICY_DONT_FORCE,
+	UFSHCD_PMC_POLICY_FORCE,
+};
+
 enum ufshcd_quirks {
 	/* Interrupt aggregation support is broken */
 	UFSHCD_QUIRK_BROKEN_INTR_AGGR			= 1 << 0,
@@ -690,6 +798,12 @@ enum ufshcd_quirks {
 	 * because it causes link startup to become unreliable.
 	 */
 	UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE		= 1 << 26,
+
+	/*
+	 * On some platforms, the VCC regulator has a slow ramp-up time. Add a
+	 * delay after enabling VCC to ensure it's stable.
+	 */
+	UFSHCD_QUIRK_VCC_ON_DELAY			= 1 << 27,
 };
 
 enum ufshcd_caps {
@@ -767,6 +881,13 @@ enum ufshcd_caps {
 	 * WriteBooster when scaling the clock down.
 	 */
 	UFSHCD_CAP_WB_WITH_CLK_SCALING			= 1 << 12,
+
+	/*
+	 * This capability allows the host controller driver to apply TX
+	 * Equalization settings discovered from UFS attributes, variant
+	 * specific operations and TX Equaliztion Training procedure.
+	 */
+	UFSHCD_CAP_TX_EQUALIZATION			= 1 << 13,
 };
 
 struct ufs_hba_variant_params {
@@ -881,7 +1002,6 @@ enum ufshcd_mcq_opr {
  * @saved_uic_err: sticky UIC error mask
  * @ufs_stats: various error counters
  * @force_reset: flag to force eh_work perform a full reset
- * @force_pmc: flag to force a power mode change
  * @silence_err_logs: flag to silence error logs
  * @dev_cmd: ufs device management command information
  * @last_dme_cmd_tstamp: time stamp of the last completed DME command
@@ -943,7 +1063,21 @@ enum ufshcd_mcq_opr {
  * @critical_health_count: count of critical health exceptions
  * @dev_lvl_exception_count: count of device level exceptions since last reset
  * @dev_lvl_exception_id: vendor specific information about the device level exception event.
+ * @dme_qos_notification: Bitfield of pending DME Quality of Service (QoS)
+ *	events. Bits[3:1] reflect the corresponding bits of UIC DME Error Code
+ *	field within the Host Controller's UECDME register. Bit[0] is a flag
+ *	indicating that the DME QoS Monitor has been reset by the host.
+ * @dme_qos_sysfs_handle: handle for 'dme_qos_notification' sysfs entry
  * @rpmbs: list of OP-TEE RPMB devices (one per RPMB region)
+ * @host_preshoot_cap: a bitfield to indicate supported PreShoot dBs of host's TX lanes, cache of
+ *	host M-PHY TX_HS_PreShoot_Setting_Capability Attribute (ID 0x15)
+ * @host_deemphasis_cap: a bitfield to indicate supported DeEmphasis dBs of host's TX lanes, cache
+ *	of host M-PHY TX_HS_DeEmphasis_Setting_Capability Attribute (ID 0x12)
+ * @device_preshoot_cap: a bitfield to indicate supported PreShoot dBs of device's TX lanes, cache
+ *	of device M-PHY TX_HS_PreShoot_Setting_Capability Attribute (ID 0x15)
+ * @device_deemphasis_cap: a bitfield to indicate supported DeEmphasis dBs of device's TX lanes,
+ *	cache of device M-PHY TX_HS_DeEmphasis_Setting_Capability Attribute (ID 0x12)
+ * @tx_eq_params: TX Equalization settings
  */
 struct ufs_hba {
 	void __iomem *mmio_base;
@@ -1035,7 +1169,6 @@ struct ufs_hba {
 	u32 saved_uic_err;
 	struct ufs_stats ufs_stats;
 	bool force_reset;
-	bool force_pmc;
 	bool silence_err_logs;
 
 	/* Device management request data */
@@ -1116,8 +1249,18 @@ struct ufs_hba {
 	int critical_health_count;
 	atomic_t dev_lvl_exception_count;
 	u64 dev_lvl_exception_id;
+
+	atomic_t dme_qos_notification;
+	struct kernfs_node *dme_qos_sysfs_handle;
+
 	u32 vcc_off_delay_us;
 	struct list_head rpmbs;
+
+	u8 host_preshoot_cap;
+	u8 host_deemphasis_cap;
+	u8 device_preshoot_cap;
+	u8 device_deemphasis_cap;
+	struct ufshcd_tx_eq_params tx_eq_params[UFS_HS_GEAR_MAX];
 };
 
 /**
@@ -1262,6 +1405,13 @@ static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
 	return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING;
 }
 
+static inline bool ufshcd_is_tx_eq_supported(struct ufs_hba *hba)
+{
+	return hba->caps & UFSHCD_CAP_TX_EQUALIZATION &&
+	       hba->ufs_version >= ufshci_version(5, 0) &&
+	       hba->dev_info.wspecversion >= 0x500;
+}
+
 #define ufsmcq_writel(hba, val, reg)	\
 	writel((val), (hba)->mcq_base + (reg))
 #define ufsmcq_readl(hba, reg)	\
@@ -1277,6 +1427,18 @@ static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
 #define ufshcd_readl(hba, reg)	\
 	readl((hba)->mmio_base + (reg))
 
+static inline const char *ufs_hs_rate_to_str(enum ufs_hs_gear_rate rate)
+{
+	switch (rate) {
+	case PA_HS_MODE_A:
+		return "A";
+	case PA_HS_MODE_B:
+		return "B";
+	default:
+		return "Unknown";
+	}
+}
+
 /**
  * ufshcd_rmwl - perform read/modify/write for a controller register
  * @hba: per adapter instance
@@ -1361,9 +1523,16 @@ extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
 			       u8 attr_set, u32 mib_val, u8 peer);
 extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
 			       u32 *mib_val, u8 peer);
+extern int ufshcd_change_power_mode(struct ufs_hba *hba,
+				    struct ufs_pa_layer_attr *pwr_mode,
+				    enum ufshcd_pmc_policy pmc_policy);
 extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
-			struct ufs_pa_layer_attr *desired_pwr_mode);
+				  struct ufs_pa_layer_attr *desired_pwr_mode,
+				  enum ufshcd_pmc_policy pmc_policy);
 extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode);
+extern int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba,
+				       struct ufshcd_tx_eq_params *params,
+				       u32 gear);
 
 /* UIC command interfaces for DME primitives */
 #define DME_LOCAL	0
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 806fdaf52bd9..9f0fdd850e54 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -115,6 +115,7 @@ enum {
 enum {
 	REG_CQIS		= 0x0,
 	REG_CQIE		= 0x4,
+	REG_MCQIACR		= 0x8,
 };
 
 enum {
@@ -188,6 +189,7 @@ static inline u32 ufshci_version(u32 major, u32 minor)
 #define SYSTEM_BUS_FATAL_ERROR			0x20000
 #define CRYPTO_ENGINE_FATAL_ERROR		0x40000
 #define MCQ_CQ_EVENT_STATUS			0x100000
+#define MCQ_IAG_EVENT_STATUS			0x200000
 
 #define UFSHCD_UIC_HIBERN8_MASK	(UIC_HIBERNATE_ENTER |\
 				UIC_HIBERNATE_EXIT)
@@ -271,6 +273,7 @@ enum {
 /* UECDME - Host UIC Error Code DME 48h */
 #define UIC_DME_ERROR			0x80000000
 #define UIC_DME_ERROR_CODE_MASK		0x1
+#define UIC_DME_QOS_MASK		0xE
 
 /* UTRIACR - Interrupt Aggregation control register - 0x4Ch */
 #define INT_AGGR_TIMEOUT_VAL_MASK		0xFF
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
index 59de737490ca..f849a2a101ae 100644
--- a/include/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -10,6 +10,8 @@
  * M-TX Configuration Attributes
  */
 #define TX_HIBERN8TIME_CAPABILITY		0x000F
+#define TX_HS_DEEMPHASIS_SETTING_CAP		0x0012
+#define TX_HS_PRESHOOT_SETTING_CAP		0x0015
 #define TX_MODE					0x0021
 #define TX_HSRATE_SERIES			0x0022
 #define TX_HSGEAR				0x0023
@@ -30,6 +32,8 @@
 #define TX_LCC_SEQUENCER			0x0032
 #define TX_MIN_ACTIVATETIME			0x0033
 #define TX_PWM_G6_G7_SYNC_LENGTH		0x0034
+#define TX_HS_DEEMPHASIS_SETTING		0x0037
+#define TX_HS_PRESHOOT_SETTING			0x003B
 #define TX_REFCLKFREQ				0x00EB
 #define TX_CFGCLKFREQVAL			0x00EC
 #define	CFGEXTRATTR				0x00F0
@@ -38,6 +42,9 @@
 /*
  * M-RX Configuration Attributes
  */
+#define RX_HS_G5_ADAPT_INITIAL_CAP		0x0074
+#define RX_HS_G6_ADAPT_INITIAL_CAP		0x007B
+#define RX_HS_G6_ADAPT_INITIAL_L0L1L2L3_CAP	0x007D
 #define RX_HS_G1_SYNC_LENGTH_CAP		0x008B
 #define RX_HS_G1_PREP_LENGTH_CAP		0x008C
 #define RX_MIN_ACTIVATETIME_CAPABILITY		0x008F
@@ -50,6 +57,7 @@
 #define RX_HIBERN8TIME_CAP			0x0092
 #define RX_ADV_HIBERN8TIME_CAP			0x0099
 #define RX_ADV_MIN_ACTIVATETIME_CAP		0x009A
+#define RX_HS_G4_ADAPT_INITIAL_CAP		0x009F
 #define RX_MODE					0x00A1
 #define RX_HSRATE_SERIES			0x00A2
 #define RX_HSGEAR				0x00A3
@@ -64,15 +72,32 @@
 #define CFGRXCDR8				0x00BA
 #define CFGRXOVR8				0x00BD
 #define CFGRXOVR6				0x00BF
+#define RX_FOM					0x00C2
 #define RXDIRECTCTRL2				0x00C7
 #define CFGRXOVR4				0x00E9
 #define RX_REFCLKFREQ				0x00EB
 #define	RX_CFGCLKFREQVAL			0x00EC
 #define CFGWIDEINLN				0x00F0
+#define RX_EYEMON_CAP				0x00F1
+#define RX_EYEMON_TIMING_MAX_STEPS_CAP		0x00F2
+#define RX_EYEMON_TIMING_MAX_OFFSET_CAP		0x00F3
+#define RX_EYEMON_VOLTAGE_MAX_STEPS_CAP		0x00F4
+#define RX_EYEMON_VOLTAGE_MAX_OFFSET_CAP	0x00F5
+#define RX_EYEMON_ENABLE			0x00F6
+#define RX_EYEMON_TIMING_STEPS			0x00F7
+#define RX_EYEMON_VOLTAGE_STEPS			0x00F8
+#define RX_EYEMON_TARGET_TEST_COUNT		0x00F9
+#define RX_EYEMON_TESTED_COUNT			0x00FA
+#define RX_EYEMON_ERROR_COUNT			0x00FB
+#define RX_EYEMON_START				0x00FC
+#define RX_EYEMON_EXTENDED_ERROR_COUNT		0x00FD
+
 #define ENARXDIRECTCFG4				0x00F2
 #define ENARXDIRECTCFG3				0x00F3
 #define ENARXDIRECTCFG2				0x00F4
 
+#define RX_EYEMON_NEGATIVE_STEP_BIT		BIT(6)
+#define RX_EYEMON_EXTENDED_VRANGE_BIT		BIT(6)
 
 #define is_mphy_tx_attr(attr)			(attr < RX_MODE)
 #define RX_ADV_FINE_GRAN_STEP(x)		((((x) & 0x3) << 1) | 0x1)
@@ -99,6 +124,18 @@
 
 #define UNIPRO_CB_OFFSET(x)			(0x8000 | x)
 
+#define ADAPT_LENGTH_MASK			0x7F
+#define ADAPT_RANGE_BIT				BIT(7)
+#define IS_ADAPT_RANGE_COARSE(x)		((x) & ADAPT_RANGE_BIT)
+
+/* Adapt definitions */
+#define ADAPT_LENGTH_MAX			0x91
+#define ADAPT_L0L3_LENGTH_MAX			0x90
+#define ADAPT_L0L1L2L3_LENGTH_MAX		0x8C
+#define TADAPT_FACTOR				650
+#define TADAPT_L0L3_FACTOR			(1 << 9)
+#define TADAPT_L0L1L2L3_FACTOR			(1 << 15)
+
 /*
  * PHY Adapter attributes
  */
@@ -164,10 +201,26 @@
 #define PA_PACPERRORCOUNT	0x15C1
 #define PA_PHYTESTCONTROL	0x15C2
 #define PA_TXHSG4SYNCLENGTH	0x15D0
+#define PA_PEERRXHSG4ADAPTINITIAL		0x15D3
 #define PA_TXHSADAPTTYPE	0x15D4
 #define PA_TXHSG5SYNCLENGTH	0x15D6
+#define PA_PEERRXHSG5ADAPTINITIAL		0x15D9
+#define PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3	0x15DE
+#define PA_PEERRXHSG6ADAPTINITIALL0L3		0x15DF
+#define PA_PEERRXHSG6ADAPTINITIALL0L1L2L3	0x15E0
+#define PA_TXEQG1SETTING			0x15E1
+#define PA_TXEQG2SETTING			0x15E2
+#define PA_TXEQG3SETTING			0x15E3
+#define PA_TXEQG4SETTING			0x15E4
+#define PA_TXEQG5SETTING			0x15E5
+#define PA_TXEQG6SETTING			0x15E6
+#define PA_TXEQTRSETTING			0x15E7
+#define PA_PEERTXEQTRSETTING			0x15E8
+#define PA_PRECODEEN				0x15E9
+#define PA_EQTR_GEAR				0x15EA
+#define PA_TXADAPTLENGTH_EQTR			0x15EB
 
-/* Adpat type for PA_TXHSADAPTTYPE attribute */
+/* Adapt type for PA_TXHSADAPTTYPE attribute */
 #define PA_REFRESH_ADAPT       0x00
 #define PA_INITIAL_ADAPT       0x01
 #define PA_NO_ADAPT            0x03
@@ -187,6 +240,88 @@
 /* PHY Adapter Protocol Constants */
 #define PA_MAXDATALANES	4
 
+/*
+ * TX EQTR's minimum TAdapt should not be less than 10us.
+ * This value is rounded up into the nearest Unit Intervals (UI)
+ */
+#define TX_EQTR_HS_G4_MIN_T_ADAPT		166400
+#define TX_EQTR_HS_G5_MIN_T_ADAPT		332800
+#define TX_EQTR_HS_G6_MIN_T_ADAPT		262144
+
+#define TX_EQTR_HS_G4_ADAPT_DEFAULT		0x88
+#define TX_EQTR_HS_G5_ADAPT_DEFAULT		0x89
+#define TX_EQTR_HS_G6_ADAPT_DEFAULT		0x89
+
+#define TX_EQTR_CAP_MASK			0x7F
+
+#define TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT	8
+#define TX_EQTR_ADAPT_RESERVED			0xFF
+
+#define TX_HS_NUM_PRESHOOT			8
+#define TX_HS_NUM_DEEMPHASIS			8
+#define TX_HS_PRESHOOT_SHIFT			4
+#define TX_HS_DEEMPHASIS_SHIFT			4
+#define TX_HS_PRESHOOT_OFFSET			0
+#define TX_HS_DEEMPHASIS_OFFSET			16
+
+#define TX_HS_PRESHOOT_LANE_SHIFT(lane) \
+	(TX_HS_PRESHOOT_OFFSET + (lane) * TX_HS_PRESHOOT_SHIFT)
+#define TX_HS_DEEMPHASIS_LANE_SHIFT(lane) \
+	(TX_HS_DEEMPHASIS_OFFSET + (lane) * TX_HS_DEEMPHASIS_SHIFT)
+
+#define TX_HS_PRESHOOT_BITS(lane, val) \
+	((val) << TX_HS_PRESHOOT_LANE_SHIFT(lane))
+#define TX_HS_DEEMPHASIS_BITS(lane, val) \
+	((val) << TX_HS_DEEMPHASIS_LANE_SHIFT(lane))
+
+#define RX_FOM_VALUE_MASK			0x7F
+#define RX_FOM_PRECODING_EN_BIT			BIT(7)
+
+#define PRECODEEN_TX_OFFSET			0
+#define PRECODEEN_RX_OFFSET			4
+#define PRECODEEN_TX_BIT(lane)		(1 << (PRECODEEN_TX_OFFSET + (lane)))
+#define PRECODEEN_RX_BIT(lane)		(1 << (PRECODEEN_RX_OFFSET + (lane)))
+
+enum ufs_tx_eq_preset {
+	UFS_TX_EQ_PRESET_P0,
+	UFS_TX_EQ_PRESET_P1,
+	UFS_TX_EQ_PRESET_P2,
+	UFS_TX_EQ_PRESET_P3,
+	UFS_TX_EQ_PRESET_P4,
+	UFS_TX_EQ_PRESET_P5,
+	UFS_TX_EQ_PRESET_P6,
+	UFS_TX_EQ_PRESET_P7,
+	UFS_TX_EQ_PRESET_MAX,
+};
+
+enum ufs_tx_hs_preshoot {
+	UFS_TX_HS_PRESHOOT_DB_0P0,
+	UFS_TX_HS_PRESHOOT_DB_0P4,
+	UFS_TX_HS_PRESHOOT_DB_0P8,
+	UFS_TX_HS_PRESHOOT_DB_1P2,
+	UFS_TX_HS_PRESHOOT_DB_1P6,
+	UFS_TX_HS_PRESHOOT_DB_2P5,
+	UFS_TX_HS_PRESHOOT_DB_3P5,
+	UFS_TX_HS_PRESHOOT_DB_4P7,
+};
+
+enum ufs_tx_hs_deemphasis {
+	UFS_TX_HS_DEEMPHASIS_DB_0P0,
+	UFS_TX_HS_DEEMPHASIS_DB_0P8,
+	UFS_TX_HS_DEEMPHASIS_DB_1P6,
+	UFS_TX_HS_DEEMPHASIS_DB_2P5,
+	UFS_TX_HS_DEEMPHASIS_DB_3P5,
+	UFS_TX_HS_DEEMPHASIS_DB_4P7,
+	UFS_TX_HS_DEEMPHASIS_DB_6P0,
+	UFS_TX_HS_DEEMPHASIS_DB_7P6,
+};
+
+enum ufs_eom_eye_mask {
+	UFS_EOM_EYE_MASK_M,
+	UFS_EOM_EYE_MASK_L,
+	UFS_EOM_EYE_MASK_U,
+};
+
 #define DL_FC0ProtectionTimeOutVal_Default	8191
 #define DL_TC0ReplayTimeOutVal_Default		65535
 #define DL_AFC0ReqTimeOutVal_Default		32767
@@ -233,7 +368,9 @@ enum ufs_hs_gear_tag {
 	UFS_HS_G2,		/* HS Gear 2 */
 	UFS_HS_G3,		/* HS Gear 3 */
 	UFS_HS_G4,		/* HS Gear 4 */
-	UFS_HS_G5		/* HS Gear 5 */
+	UFS_HS_G5,		/* HS Gear 5 */
+	UFS_HS_G6,		/* HS Gear 6 */
+	UFS_HS_GEAR_MAX = UFS_HS_G6,
 };
 
 enum ufs_lanes {
