Dataset Viewer
	text
				 
			stringlengths 213 
			7.14k 
			 | idx
				 
			int64 16 
			12.5k 
			 | 
|---|---|
	--- initial
+++ final
@@ -1,86 +1,82 @@
 static int raw_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) {
   struct sock *sk = sock->sk;
   struct raw_sock *ro = raw_sk(sk);
   struct can_filter *filter = NULL; /* dyn. alloc'ed filters */
   struct can_filter sfilter;        /* single filter */
   struct net_device *dev = NULL;
   can_err_mask_t err_mask = 0;
   int count = 0;
   int err = 0;
   if (level != SOL_CAN_RAW) return -EINVAL;
   switch (optname) {
   case CAN_RAW_FILTER:
     if (optlen % sizeof(struct can_filter) != 0) return -EINVAL;
     count = optlen / sizeof(struct can_filter);
     if (count > 1) {
       /* filter does not fit into dfilter => alloc space */
-      filter = kmalloc(optlen, GFP_KERNEL);
-      if (!filter) return -ENOMEM;
-      if (copy_from_user(filter, optval, optlen)) {
-        kfree(filter);
-        return -EFAULT;
-      }
+      filter = memdup_user(optval, optlen);
+      if (IS_ERR(filter)) return PTR_ERR(filter);
     } else if (count == 1) {
       if (copy_from_user(&sfilter, optval, sizeof(sfilter))) return -EFAULT;
     }
     lock_sock(sk);
     if (ro->bound && ro->ifindex) dev = dev_get_by_index(&init_net, ro->ifindex);
     if (ro->bound) {
       /* (try to) register the new filters */
       if (count == 1)
         err = raw_enable_filters(dev, sk, &sfilter, 1);
       else
         err = raw_enable_filters(dev, sk, filter, count);
       if (err) {
         if (count > 1) kfree(filter);
         goto out_fil;
       }
       /* remove old filter registrations */
       raw_disable_filters(dev, sk, ro->filter, ro->count);
     }
     /* remove old filter space */
     if (ro->count > 1) kfree(ro->filter);
     /* link new filters to the socket */
     if (count == 1) {
       /* copy filter data for single filter */
       ro->dfilter = sfilter;
       filter = &ro->dfilter;
     }
     ro->filter = filter;
     ro->count = count;
   out_fil:
     if (dev) dev_put(dev);
     release_sock(sk);
     break;
   case CAN_RAW_ERR_FILTER:
     if (optlen != sizeof(err_mask)) return -EINVAL;
     if (copy_from_user(&err_mask, optval, optlen)) return -EFAULT;
     err_mask &= CAN_ERR_MASK;
     lock_sock(sk);
     if (ro->bound && ro->ifindex) dev = dev_get_by_index(&init_net, ro->ifindex);
     /* remove current error mask */
     if (ro->bound) {
       /* (try to) register the new err_mask */
       err = raw_enable_errfilter(dev, sk, err_mask);
       if (err) goto out_err;
       /* remove old err_mask registration */
       raw_disable_errfilter(dev, sk, ro->err_mask);
     }
     /* link new err_mask to the socket */
     ro->err_mask = err_mask;
   out_err:
     if (dev) dev_put(dev);
     release_sock(sk);
     break;
   case CAN_RAW_LOOPBACK:
     if (optlen != sizeof(ro->loopback)) return -EINVAL;
     if (copy_from_user(&ro->loopback, optval, optlen)) return -EFAULT;
     break;
   case CAN_RAW_RECV_OWN_MSGS:
     if (optlen != sizeof(ro->recv_own_msgs)) return -EINVAL;
     if (copy_from_user(&ro->recv_own_msgs, optval, optlen)) return -EFAULT;
     break;
   default: return -ENOPROTOOPT;
   }
   return err;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 98 
							 | 
					
	--- initial
+++ final
@@ -1,24 +1,20 @@
 static int get_filter(void __user *arg, struct sock_filter **p) {
   struct sock_fprog uprog;
   struct sock_filter *code = NULL;
   int len, err;
   if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT;
   if (!uprog.len) {
     *p = NULL;
     return 0;
   }
   len = uprog.len * sizeof(struct sock_filter);
-  code = kmalloc(len, GFP_KERNEL);
-  if (code == NULL) return -ENOMEM;
-  if (copy_from_user(code, uprog.filter, len)) {
-    kfree(code);
-    return -EFAULT;
-  }
+  code = memdup_user(uprog.filter, len);
+  if (IS_ERR(code)) return PTR_ERR(code);
   err = sk_chk_filter(code, uprog.len);
   if (err) {
     kfree(code);
     return err;
   }
   *p = code;
   return uprog.len;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 99 
							 | 
					
	--- initial
+++ final
@@ -1,26 +1,22 @@
 static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read) {
   struct sdla_mem mem;
   char *temp;
   if (copy_from_user(&mem, info, sizeof(mem))) return -EFAULT;
 
   if (read) {
     temp = kzalloc(mem.len, GFP_KERNEL);
     if (!temp) return (-ENOMEM);
     sdla_read(dev, mem.addr, temp, mem.len);
     if (copy_to_user(mem.data, temp, mem.len)) {
       kfree(temp);
       return -EFAULT;
     }
     kfree(temp);
   } else {
-    temp = kmalloc(mem.len, GFP_KERNEL);
-    if (!temp) return (-ENOMEM);
-    if (copy_from_user(temp, mem.data, mem.len)) {
-      kfree(temp);
-      return -EFAULT;
-    }
+    temp = memdup_user(mem.data, mem.len);
+    if (IS_ERR(temp)) return PTR_ERR(temp);
     sdla_write(dev, mem.addr, temp, mem.len);
     kfree(temp);
   }
   return (0);
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 101 
							 | 
					
	--- initial
+++ final
@@ -1,17 +1,13 @@
 static int dccp_setsockopt_ccid(struct sock *sk, int type, char __user *optval, unsigned int optlen) {
   u8 *val;
   int rc = 0;
   if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) return -EINVAL;
-  val = kmalloc(optlen, GFP_KERNEL);
-  if (val == NULL) return -ENOMEM;
-  if (copy_from_user(val, optval, optlen)) {
-    kfree(val);
-    return -EFAULT;
-  }
+  val = memdup_user(optval, optlen);
+  if (IS_ERR(val)) return PTR_ERR(val);
   lock_sock(sk);
   if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID) rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
   if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID)) rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
   release_sock(sk);
   kfree(val);
   return rc;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 102 
							 | 
					
	--- initial
+++ final
@@ -1,100 +1,96 @@
 static int fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) {
   struct fst_card_info *card;
   struct fst_port_info *port;
   struct fstioc_write wrthdr;
   struct fstioc_info info;
   unsigned long flags;
   void *buf;
   dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, ifr->ifr_data);
   port = dev_to_port(dev);
   card = port->card;
   if (!capable(CAP_NET_ADMIN)) return -EPERM;
   switch (cmd) {
   case FSTCPURESET:
     fst_cpureset(card);
     card->state = FST_RESET;
     return 0;
   case FSTCPURELEASE:
     fst_cpurelease(card);
     card->state = FST_STARTING;
     return 0;
   case FSTWRITE: /* Code write (download) */
     /* First copy in the header with the length and offset of data
      * to write
      */
     if (ifr->ifr_data == NULL) { return -EINVAL; }
     if (copy_from_user(&wrthdr, ifr->ifr_data, sizeof(struct fstioc_write))) { return -EFAULT; }
     /* Sanity check the parameters. We don't support partial writes
      * when going over the top
      */
     if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE || wrthdr.size + wrthdr.offset > FST_MEMSIZE) { return -ENXIO; }
     /* Now copy the data to the card. */
-    buf = kmalloc(wrthdr.size, GFP_KERNEL);
-    if (!buf) return -ENOMEM;
-    if (copy_from_user(buf, ifr->ifr_data + sizeof(struct fstioc_write), wrthdr.size)) {
-      kfree(buf);
-      return -EFAULT;
-    }
+    buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write), wrthdr.size);
+    if (IS_ERR(buf)) return PTR_ERR(buf);
     memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size);
     kfree(buf);
     /* Writes to the memory of a card in the reset state constitute
      * a download
      */
     if (card->state == FST_RESET) { card->state = FST_DOWNLOAD; }
     return 0;
   case FSTGETCONF:
     /* If card has just been started check the shared memory config
      * version and marker
      */
     if (card->state == FST_STARTING) {
       check_started_ok(card);
       /* If everything checked out enable card interrupts */
       if (card->state == FST_RUNNING) {
         spin_lock_irqsave(&card->card_lock, flags);
         fst_enable_intr(card);
         FST_WRB(card, interruptHandshake, 0xEE);
         spin_unlock_irqrestore(&card->card_lock, flags);
       }
     }
     if (ifr->ifr_data == NULL) { return -EINVAL; }
     gather_conf_info(card, port, &info);
     if (copy_to_user(ifr->ifr_data, &info, sizeof(info))) { return -EFAULT; }
     return 0;
   case FSTSETCONF:
     /*
      * Most of the settings have been moved to the generic ioctls
      * this just covers debug and board ident now
      */
     if (card->state != FST_RUNNING) {
       printk_err("Attempt to configure card %d in non-running state (%d)\n", card->card_no, card->state);
       return -EIO;
     }
     if (copy_from_user(&info, ifr->ifr_data, sizeof(info))) { return -EFAULT; }
     return set_conf_from_info(card, port, &info);
   case SIOCWANDEV:
     switch (ifr->ifr_settings.type) {
     case IF_GET_IFACE: return fst_get_iface(card, port, ifr);
     case IF_IFACE_SYNC_SERIAL:
     case IF_IFACE_V35:
     case IF_IFACE_V24:
     case IF_IFACE_X21:
     case IF_IFACE_X21D:
     case IF_IFACE_T1:
     case IF_IFACE_E1: return fst_set_iface(card, port, ifr);
     case IF_PROTO_RAW: port->mode = FST_RAW; return 0;
     case IF_GET_PROTO:
       if (port->mode == FST_RAW) {
         ifr->ifr_settings.type = IF_PROTO_RAW;
         return 0;
       }
       return hdlc_ioctl(dev, ifr, cmd);
     default:
       port->mode = FST_GEN_HDLC;
       dbg(DBG_IOCTL, "Passing this type to hdlc %x\n", ifr->ifr_settings.type);
       return hdlc_ioctl(dev, ifr, cmd);
     }
   default:
     /* Not one of ours. Pass through to HDLC package */
     return hdlc_ioctl(dev, ifr, cmd);
   }
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 103 
							 | 
					
	--- initial
+++ final
@@ -1,58 +1,54 @@
 static int pcbit_writecmd(const u_char __user *buf, int len, int driver, int channel) {
   struct pcbit_dev *dev;
   int i, j;
   const u_char *loadbuf;
   u_char *ptr = NULL;
   u_char *cbuf;
   int errstat;
   dev = finddev(driver);
   if (!dev) {
     printk("pcbit_writecmd: couldn't find device");
     return -ENODEV;
   }
   switch (dev->l2_state) {
   case L2_LWMODE:
     /* check (size <= rdp_size); write buf into board */
     if (len < 0 || len > BANK4 + 1 || len > 1024) {
       printk("pcbit_writecmd: invalid length %d\n", len);
       return -EINVAL;
     }
-    cbuf = kmalloc(len, GFP_KERNEL);
-    if (!cbuf) return -ENOMEM;
-    if (copy_from_user(cbuf, buf, len)) {
-      kfree(cbuf);
-      return -EFAULT;
-    }
+    cbuf = memdup_user(buf, len);
+    if (IS_ERR(cbuf)) return PTR_ERR(cbuf);
     memcpy_toio(dev->sh_mem, cbuf, len);
     kfree(cbuf);
     return len;
   case L2_FWMODE:
     /* this is the hard part */
     /* dumb board */
     /* get it into kernel space */
     if ((ptr = kmalloc(len, GFP_KERNEL)) == NULL) return -ENOMEM;
     if (copy_from_user(ptr, buf, len)) {
       kfree(ptr);
       return -EFAULT;
     }
     loadbuf = ptr;
 
     errstat = 0;
     for (i = 0; i < len; i++) {
       for (j = 0; j < LOAD_RETRY; j++)
         if (!(readb(dev->sh_mem + dev->loadptr))) break;
       if (j == LOAD_RETRY) {
         errstat = -ETIME;
         printk("TIMEOUT i=%d\n", i);
         break;
       }
       writeb(loadbuf[i], dev->sh_mem + dev->loadptr + 1);
       writeb(0x01, dev->sh_mem + dev->loadptr);
       dev->loadptr += 2;
       if (dev->loadptr > LOAD_ZONE_END) dev->loadptr = LOAD_ZONE_START;
     }
     kfree(ptr);
     return errstat ? errstat : len;
   default: return -EBUSY;
   }
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 104 
							 | 
					
	--- initial
+++ final
@@ -1,25 +1,21 @@
 static int get_filter(void __user *arg, struct sock_filter **p) {
   struct sock_fprog uprog;
   struct sock_filter *code = NULL;
   int len, err;
   if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT;
   if (!uprog.len) {
     *p = NULL;
     return 0;
   }
   /* uprog.len is unsigned short, so no overflow here */
   len = uprog.len * sizeof(struct sock_filter);
-  code = kmalloc(len, GFP_KERNEL);
-  if (code == NULL) return -ENOMEM;
-  if (copy_from_user(code, uprog.filter, len)) {
-    kfree(code);
-    return -EFAULT;
-  }
+  code = memdup_user(uprog.filter, len);
+  if (IS_ERR(code)) return PTR_ERR(code);
   err = sk_chk_filter(code, uprog.len);
   if (err) {
     kfree(code);
     return err;
   }
   *p = code;
   return uprog.len;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 106 
							 | 
					
	--- initial
+++ final
@@ -1,42 +1,37 @@
 static long dabusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) {
   pdabusb_t s = (pdabusb_t)file->private_data;
   pbulk_transfer_t pbulk;
   int ret = 0;
   int version = DABUSB_VERSION;
   dbg("dabusb_ioctl");
   lock_kernel();
   if (s->remove_pending) {
     unlock_kernel();
     return -EIO;
   }
   mutex_lock(&s->mutex);
   if (!s->usbdev) {
     mutex_unlock(&s->mutex);
     unlock_kernel();
     return -EIO;
   }
   switch (cmd) {
   case IOCTL_DAB_BULK:
-    pbulk = kmalloc(sizeof(bulk_transfer_t), GFP_KERNEL);
-    if (!pbulk) {
-      ret = -ENOMEM;
-      break;
-    }
-    if (copy_from_user(pbulk, (void __user *)arg, sizeof(bulk_transfer_t))) {
-      ret = -EFAULT;
-      kfree(pbulk);
+    pbulk = memdup_user((void __user *)arg, sizeof(bulk_transfer_t));
+    if (IS_ERR(pbulk)) {
+      ret = PTR_ERR(pbulk);
       break;
     }
     ret = dabusb_bulk(s, pbulk);
     if (ret == 0)
       if (copy_to_user((void __user *)arg, pbulk, sizeof(bulk_transfer_t))) ret = -EFAULT;
     kfree(pbulk);
     break;
   case IOCTL_DAB_OVERRUNS: ret = put_user(s->overruns, (unsigned int __user *)arg); break;
   case IOCTL_DAB_VERSION: ret = put_user(version, (unsigned int __user *)arg); break;
   default: ret = -ENOIOCTLCMD; break;
   }
   mutex_unlock(&s->mutex);
   unlock_kernel();
   return ret;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant C;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
        {
-		r = -C;
+		r = PTR_ERR(e1);
-		kfree(e1);
		...
	}
<|end_of_text|> 
 | 107 
							 | 
					
	--- initial
+++ final
@@ -1,20 +1,16 @@
 static int dvbdmx_write(struct dmx_demux *demux, const char __user *buf, size_t count) {
   struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
   void *p;
   if ((!demux->frontend) || (demux->frontend->source != DMX_MEMORY_FE)) return -EINVAL;
-  p = kmalloc(count, GFP_USER);
-  if (!p) return -ENOMEM;
-  if (copy_from_user(p, buf, count)) {
-    kfree(p);
-    return -EFAULT;
-  }
+  p = memdup_user(buf, count);
+  if (IS_ERR(p)) return PTR_ERR(p);
   if (mutex_lock_interruptible(&dvbdemux->mutex)) {
     kfree(p);
     return -ERESTARTSYS;
   }
   dvb_dmx_swfilter(dvbdemux, p, count);
   kfree(p);
   mutex_unlock(&dvbdemux->mutex);
   if (signal_pending(current)) return -EINTR;
   return count;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 108 
							 | 
					
	--- initial
+++ final
@@ -1,13 +1,9 @@
 static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len) {
   void *data;
   *dest = NULL;
   if (!len) return 0;
-  data = kmalloc(len, GFP_KERNEL);
-  if (!data) return -ENOMEM;
-  if (copy_from_user(data, (void __user *)(unsigned long)src, len)) {
-    kfree(data);
-    return -EFAULT;
-  }
+  data = memdup_user((void __user *)(unsigned long)src, len);
+  if (IS_ERR(data)) return PTR_ERR(data);
   *dest = data;
   return 0;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 109 
							 | 
					
	--- initial
+++ final
@@ -1,30 +1,26 @@
 static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, uint64_t start, uint32_t length, void __user *ptr, uint32_t __user *retp) {
   struct mtd_oob_ops ops;
   uint32_t retlen;
   int ret = 0;
   if (!(file->f_mode & FMODE_WRITE)) return -EPERM;
   if (length > 4096) return -EINVAL;
   if (!mtd->write_oob)
     ret = -EOPNOTSUPP;
   else
     ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
   if (ret) return ret;
   ops.ooblen = length;
   ops.ooboffs = start & (mtd->oobsize - 1);
   ops.datbuf = NULL;
   ops.mode = MTD_OOB_PLACE;
   if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) return -EINVAL;
-  ops.oobbuf = kmalloc(length, GFP_KERNEL);
-  if (!ops.oobbuf) return -ENOMEM;
-  if (copy_from_user(ops.oobbuf, ptr, length)) {
-    kfree(ops.oobbuf);
-    return -EFAULT;
-  }
+  ops.oobbuf = memdup_user(ptr, length);
+  if (IS_ERR(ops.oobbuf)) return PTR_ERR(ops.oobbuf);
   start &= ~((uint64_t)mtd->oobsize - 1);
   ret = mtd->write_oob(mtd, start, &ops);
   if (ops.oobretlen > 0xFFFFFFFFU) ret = -EOVERFLOW;
   retlen = ops.oobretlen;
   if (copy_to_user(retp, &retlen, sizeof(length))) ret = -EFAULT;
   kfree(ops.oobbuf);
   return ret;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 110 
							 | 
					
	--- initial
+++ final
@@ -1,54 +1,50 @@
 static int prism54_hostapd(struct net_device *ndev, struct iw_point *p) {
   struct prism2_hostapd_param *param;
   int ret = 0;
   u32 uwrq;
   printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
   if (p->length < sizeof(struct prism2_hostapd_param) || p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) return -EINVAL;
-  param = kmalloc(p->length, GFP_KERNEL);
-  if (param == NULL) return -ENOMEM;
-  if (copy_from_user(param, p->pointer, p->length)) {
-    kfree(param);
-    return -EFAULT;
-  }
+  param = memdup_user(p->pointer, p->length);
+  if (IS_ERR(param)) return PTR_ERR(param);
   switch (param->cmd) {
   case PRISM2_SET_ENCRYPTION:
     printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n", ndev->name);
     ret = prism2_ioctl_set_encryption(ndev, param, p->length);
     break;
   case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
     printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n", ndev->name);
     ret = prism2_ioctl_set_generic_element(ndev, param, p->length);
     break;
   case PRISM2_HOSTAPD_MLME:
     printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n", ndev->name);
     ret = prism2_ioctl_mlme(ndev, param);
     break;
   case PRISM2_HOSTAPD_SCAN_REQ:
     printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n", ndev->name);
     ret = prism2_ioctl_scan_req(ndev, param);
     break;
   case PRISM54_SET_WPA:
     printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n", ndev->name);
     uwrq = 1;
     ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
     break;
   case PRISM54_DROP_UNENCRYPTED: printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n", ndev->name);
 #if 0
 	       uwrq = 0x01;
 	       mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
 	       down_write(&priv->mib_sem);
 	       mgt_commit(priv);
 	       up_write(&priv->mib_sem);
 #endif
     /* Not necessary, as set_wpa does it, should we just do it here though? */
     ret = 0;
     break;
   default:
     printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n", ndev->name);
     ret = -EOPNOTSUPP;
     break;
   }
   if (ret == 0 && copy_to_user(p->pointer, param, p->length)) ret = -EFAULT;
   kfree(param);
   return ret;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 111 
							 | 
					
	--- initial
+++ final
@@ -1,70 +1,66 @@
 static ssize_t dev_config(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) {
   struct dev_data *dev = fd->private_data;
   ssize_t value = len, length = len;
   unsigned total;
   u32 tag;
   char *kbuf;
   if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) return -EINVAL;
   /* we might need to change message format someday */
   if (copy_from_user(&tag, buf, 4)) return -EFAULT;
   if (tag != 0) return -EINVAL;
   buf += 4;
   length -= 4;
-  kbuf = kmalloc(length, GFP_KERNEL);
-  if (!kbuf) return -ENOMEM;
-  if (copy_from_user(kbuf, buf, length)) {
-    kfree(kbuf);
-    return -EFAULT;
-  }
+  kbuf = memdup_user(buf, length);
+  if (IS_ERR(kbuf)) return PTR_ERR(kbuf);
   spin_lock_irq(&dev->lock);
   value = -EINVAL;
   if (dev->buf) goto fail;
   dev->buf = kbuf;
   /* full or low speed config */
   dev->config = (void *)kbuf;
   total = le16_to_cpu(dev->config->wTotalLength);
   if (!is_valid_config(dev->config) || total >= length) goto fail;
   kbuf += total;
   length -= total;
   /* optional high speed config */
   if (kbuf[1] == USB_DT_CONFIG) {
     dev->hs_config = (void *)kbuf;
     total = le16_to_cpu(dev->hs_config->wTotalLength);
     if (!is_valid_config(dev->hs_config) || total >= length) goto fail;
     kbuf += total;
     length -= total;
   }
   /* could support multiple configs, using another encoding! */
   /* device descriptor (tweaked for paranoia) */
   if (length != USB_DT_DEVICE_SIZE) goto fail;
   dev->dev = (void *)kbuf;
   if (dev->dev->bLength != USB_DT_DEVICE_SIZE || dev->dev->bDescriptorType != USB_DT_DEVICE || dev->dev->bNumConfigurations != 1) goto fail;
   dev->dev->bNumConfigurations = 1;
   dev->dev->bcdUSB = cpu_to_le16(0x0200);
   /* triggers gadgetfs_bind(); then we can enumerate. */
   spin_unlock_irq(&dev->lock);
   value = usb_gadget_register_driver(&gadgetfs_driver);
   if (value != 0) {
     kfree(dev->buf);
     dev->buf = NULL;
   } else {
     /* at this point "good" hardware has for the first time
      * let the USB the host see us.  alternatively, if users
      * unplug/replug that will clear all the error state.
      *
      * note:  everything running before here was guaranteed
      * to choke driver model style diagnostics.  from here
      * on, they can work ... except in cleanup paths that
      * kick in after the ep0 descriptor is closed.
      */
     fd->f_op = &ep0_io_operations;
     value = len;
   }
   return value;
 fail:
   spin_unlock_irq(&dev->lock);
   pr_debug("%s: %s fail %Zd, %p\n", shortname, __func__, value, dev);
   kfree(dev->buf);
   dev->buf = NULL;
   return value;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 112 
							 | 
					
	--- initial
+++ final
@@ -1,30 +1,28 @@
 static ssize_t ep_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) {
   struct ep_data *data = fd->private_data;
   void *kbuf;
   ssize_t value;
   if ((value = get_ready_ep(fd->f_flags, data)) < 0) return value;
   /* halt any endpoint by doing a "wrong direction" i/o call */
   if (!usb_endpoint_dir_in(&data->desc)) {
     if (usb_endpoint_xfer_isoc(&data->desc)) return -EINVAL;
     DBG(data->dev, "%s halt\n", data->name);
     spin_lock_irq(&data->dev->lock);
     if (likely(data->ep != NULL)) usb_ep_set_halt(data->ep);
     spin_unlock_irq(&data->dev->lock);
     mutex_unlock(&data->lock);
     return -EBADMSG;
   }
   /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
   value = -ENOMEM;
-  kbuf = kmalloc(len, GFP_KERNEL);
-  if (!kbuf) goto free1;
-  if (copy_from_user(kbuf, buf, len)) {
+  kbuf = memdup_user(buf, len);
+  if (IS_ERR(kbuf)) {
     value = -EFAULT;
     goto free1;
   }
   value = ep_io(data, kbuf, len);
   VDEBUG(data->dev, "%s write %zu IN, status %d\n", data->name, len, (int)value);
 free1:
   mutex_unlock(&data->lock);
-  kfree(kbuf);
-  return value;
+  return PTR_ERR(kbuf);
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
        {
		...
-		kfree(e1);
		...
-		return r;
+		return PTR_ERR(e1);
	}
<|end_of_text|> 
 | 113 
							 | 
					
	--- initial
+++ final
@@ -1,120 +1,108 @@
 static int vfe_config(struct msm_vfe_cfg_cmd *cmd, void *data) {
   struct msm_pmem_region *regptr;
   struct msm_vfe_command_8k vfecmd;
   uint32_t i;
   void *cmd_data = NULL;
   long rc = 0;
   struct vfe_cmd_axi_output_config *axio = NULL;
   struct vfe_cmd_stats_setting *scfg = NULL;
   if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE && cmd->cmd_type != CMD_STATS_BUF_RELEASE) {
     if (copy_from_user(&vfecmd, (void __user *)(cmd->value), sizeof(struct msm_vfe_command_8k))) return -EFAULT;
   }
   CDBG("vfe_config: cmdType = %d\n", cmd->cmd_type);
   switch (cmd->cmd_type) {
   case CMD_GENERAL: rc = vfe_proc_general(&vfecmd); break;
   case CMD_STATS_ENABLE:
   case CMD_STATS_AXI_CFG: {
     struct axidata *axid;
     axid = data;
     if (!axid) return -EFAULT;
     scfg = kmalloc(sizeof(struct vfe_cmd_stats_setting), GFP_ATOMIC);
     if (!scfg) return -ENOMEM;
     if (copy_from_user(scfg, (void __user *)(vfecmd.value), vfecmd.length)) {
       kfree(scfg);
       return -EFAULT;
     }
     regptr = axid->region;
     if (axid->bufnum1 > 0) {
       for (i = 0; i < axid->bufnum1; i++) {
         scfg->awbBuffer[i] = (uint32_t)(regptr->paddr);
         regptr++;
       }
     }
     if (axid->bufnum2 > 0) {
       for (i = 0; i < axid->bufnum2; i++) {
         scfg->afBuffer[i] = (uint32_t)(regptr->paddr);
         regptr++;
       }
     }
     vfe_stats_config(scfg);
   } break;
   case CMD_STATS_AF_AXI_CFG: {
   } break;
   case CMD_FRAME_BUF_RELEASE: {
     /* preview buffer release */
     struct msm_frame *b;
     unsigned long p;
     struct vfe_cmd_output_ack fack;
     if (!data) return -EFAULT;
     b = (struct msm_frame *)(cmd->value);
     p = *(unsigned long *)data;
     b->path = MSM_FRAME_ENC;
     fack.ybufaddr[0] = (uint32_t)(p + b->y_off);
     fack.chromabufaddr[0] = (uint32_t)(p + b->cbcr_off);
     if (b->path == MSM_FRAME_PREV_1) vfe_output1_ack(&fack);
     if (b->path == MSM_FRAME_ENC || b->path == MSM_FRAME_PREV_2) vfe_output2_ack(&fack);
   } break;
   case CMD_SNAP_BUF_RELEASE: {
   } break;
   case CMD_STATS_BUF_RELEASE: {
     struct vfe_cmd_stats_wb_exp_ack sack;
     if (!data) return -EFAULT;
     sack.nextWbExpOutputBufferAddr = *(uint32_t *)data;
     vfe_stats_wb_exp_ack(&sack);
   } break;
   case CMD_AXI_CFG_OUT1: {
     struct axidata *axid;
     axid = data;
     if (!axid) return -EFAULT;
-    axio = kmalloc(sizeof(struct vfe_cmd_axi_output_config), GFP_ATOMIC);
-    if (!axio) return -ENOMEM;
-    if (copy_from_user(axio, (void __user *)(vfecmd.value), sizeof(struct vfe_cmd_axi_output_config))) {
-      kfree(axio);
-      return -EFAULT;
-    }
+    axio = memdup_user((void __user *)(vfecmd.value), sizeof(struct vfe_cmd_axi_output_config));
+    if (IS_ERR(axio)) return PTR_ERR(axio);
     vfe_config_axi(OUTPUT_1, axid, axio);
     vfe_axi_output_config(axio);
   } break;
   case CMD_AXI_CFG_OUT2:
   case CMD_RAW_PICT_AXI_CFG: {
     struct axidata *axid;
     axid = data;
     if (!axid) return -EFAULT;
-    axio = kmalloc(sizeof(struct vfe_cmd_axi_output_config), GFP_ATOMIC);
-    if (!axio) return -ENOMEM;
-    if (copy_from_user(axio, (void __user *)(vfecmd.value), sizeof(struct vfe_cmd_axi_output_config))) {
-      kfree(axio);
-      return -EFAULT;
-    }
+    axio = memdup_user((void __user *)(vfecmd.value), sizeof(struct vfe_cmd_axi_output_config));
+    if (IS_ERR(axio)) return PTR_ERR(axio);
     vfe_config_axi(OUTPUT_2, axid, axio);
     axio->outputDataSize = 0;
     vfe_axi_output_config(axio);
   } break;
   case CMD_AXI_CFG_SNAP_O1_AND_O2: {
     struct axidata *axid;
     axid = data;
     if (!axid) return -EFAULT;
-    axio = kmalloc(sizeof(struct vfe_cmd_axi_output_config), GFP_ATOMIC);
-    if (!axio) return -ENOMEM;
-    if (copy_from_user(axio, (void __user *)(vfecmd.value), sizeof(struct vfe_cmd_axi_output_config))) {
-      kfree(axio);
-      return -EFAULT;
-    }
+    axio = memdup_user((void __user *)(vfecmd.value), sizeof(struct vfe_cmd_axi_output_config));
+    if (IS_ERR(axio)) return PTR_ERR(axio);
     vfe_config_axi(OUTPUT_1_AND_2, axid, axio);
     vfe_axi_output_config(axio);
     cmd_data = axio;
   } break;
   default: break;
   } /* switch */
   kfree(scfg);
   kfree(axio);
   /*
           if (cmd->length > 256 &&
                           cmd_data &&
                           (cmd->cmd_type == CMD_GENERAL ||
                            cmd->cmd_type == CMD_STATS_DISABLE)) {
                   kfree(cmd_data);
           }
   */
   return rc;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 114 
							 | 
					
	--- initial
+++ final
@@ -1,45 +1,41 @@
 static int con_font_set(struct vc_data *vc, struct console_font_op *op) {
   struct console_font font;
   int rc = -EINVAL;
   int size;
   if (vc->vc_mode != KD_TEXT) return -EINVAL;
   if (!op->data) return -EINVAL;
   if (op->charcount > 512) return -EINVAL;
   if (!op->height) { /* Need to guess font height [compat] */
     int h, i;
     u8 __user *charmap = op->data;
     u8 tmp;
 
     /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
        so that we can get rid of this soon */
     if (!(op->flags & KD_FONT_FLAG_OLD)) return -EINVAL;
     for (h = 32; h > 0; h--)
       for (i = 0; i < op->charcount; i++) {
         if (get_user(tmp, &charmap[32 * i + h - 1])) return -EFAULT;
         if (tmp) goto nonzero;
       }
     return -EINVAL;
   nonzero:
     op->height = h;
   }
   if (op->width <= 0 || op->width > 32 || op->height > 32) return -EINVAL;
   size = (op->width + 7) / 8 * 32 * op->charcount;
   if (size > max_font_size) return -ENOSPC;
   font.charcount = op->charcount;
   font.height = op->height;
   font.width = op->width;
-  font.data = kmalloc(size, GFP_KERNEL);
-  if (!font.data) return -ENOMEM;
-  if (copy_from_user(font.data, op->data, size)) {
-    kfree(font.data);
-    return -EFAULT;
-  }
+  font.data = memdup_user(op->data, size);
+  if (IS_ERR(font.data)) return PTR_ERR(font.data);
   acquire_console_sem();
   if (vc->vc_sw->con_font_set)
     rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
   else
     rc = -ENOSYS;
   release_console_sem();
   kfree(font.data);
   return rc;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 115 
							 | 
					
	--- initial
+++ final
@@ -1,46 +1,42 @@
 static int i2o_cfg_parms(unsigned long arg, unsigned int type) {
   int ret = 0;
   struct i2o_controller *c;
   struct i2o_device *dev;
   struct i2o_cmd_psetget __user *cmd = (struct i2o_cmd_psetget __user *)arg;
   struct i2o_cmd_psetget kcmd;
   u32 reslen;
   u8 *ops;
   u8 *res;
   int len = 0;
   u32 i2o_cmd = (type == I2OPARMGET ? I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET);
   if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget))) return -EFAULT;
   if (get_user(reslen, kcmd.reslen)) return -EFAULT;
   c = i2o_find_iop(kcmd.iop);
   if (!c) return -ENXIO;
   dev = i2o_iop_find_device(c, kcmd.tid);
   if (!dev) return -ENXIO;
-  ops = kmalloc(kcmd.oplen, GFP_KERNEL);
-  if (!ops) return -ENOMEM;
-  if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
-    kfree(ops);
-    return -EFAULT;
-  }
+  ops = memdup_user(kcmd.opbuf, kcmd.oplen);
+  if (IS_ERR(ops)) return PTR_ERR(ops);
   /*
    * It's possible to have a _very_ large table
    * and that the user asks for all of it at once...
    */
   res = kmalloc(65536, GFP_KERNEL);
   if (!res) {
     kfree(ops);
     return -ENOMEM;
   }
   len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536);
   kfree(ops);
   if (len < 0) {
     kfree(res);
     return -EAGAIN;
   }
   put_user(len, kcmd.reslen);
   if (len > reslen)
     ret = -ENOBUFS;
   else if (copy_to_user(kcmd.resbuf, res, len))
     ret = -EFAULT;
   kfree(res);
   return ret;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 116 
							 | 
					
	--- initial
+++ final
@@ -1,12 +1,8 @@
 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) {
   struct autofs_dev_ioctl tmp, *ads;
   if (copy_from_user(&tmp, in, sizeof(tmp))) return ERR_PTR(-EFAULT);
   if (tmp.size < sizeof(tmp)) return ERR_PTR(-EINVAL);
-  ads = kmalloc(tmp.size, GFP_KERNEL);
-  if (!ads) return ERR_PTR(-ENOMEM);
-  if (copy_from_user(ads, in, tmp.size)) {
-    kfree(ads);
-    return ERR_PTR(-EFAULT);
-  }
+  ads = memdup_user(in, tmp.size);
+  if (IS_ERR(ads)) return PTR_ERR(ads);
   return ads;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 117 
							 | 
					
	--- initial
+++ final
@@ -1,42 +1,37 @@
 static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user *cp) {
   IXJ_FILTER_CADENCE *lcp;
-  lcp = kmalloc(sizeof(IXJ_FILTER_CADENCE), GFP_KERNEL);
-  if (lcp == NULL) {
-    if (ixjdebug & 0x0001) { printk(KERN_INFO "Could not allocate memory for cadence\n"); }
-    return -ENOMEM;
-  }
-  if (copy_from_user(lcp, cp, sizeof(IXJ_FILTER_CADENCE))) {
+  lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
+  if (IS_ERR(lcp)) {
     if (ixjdebug & 0x0001) { printk(KERN_INFO "Could not copy cadence to kernel\n"); }
-    kfree(lcp);
-    return -EFAULT;
+    return PTR_ERR(lcp);
   }
   if (lcp->filter > 5) {
     if (ixjdebug & 0x0001) { printk(KERN_INFO "Cadence out of range\n"); }
     kfree(lcp);
     return -1;
   }
   j->cadence_f[lcp->filter].state = 0;
   j->cadence_f[lcp->filter].enable = lcp->enable;
   j->filter_en[lcp->filter] = j->cadence_f[lcp->filter].en_filter = lcp->en_filter;
   j->cadence_f[lcp->filter].on1 = lcp->on1;
   j->cadence_f[lcp->filter].on1min = 0;
   j->cadence_f[lcp->filter].on1max = 0;
   j->cadence_f[lcp->filter].off1 = lcp->off1;
   j->cadence_f[lcp->filter].off1min = 0;
   j->cadence_f[lcp->filter].off1max = 0;
   j->cadence_f[lcp->filter].on2 = lcp->on2;
   j->cadence_f[lcp->filter].on2min = 0;
   j->cadence_f[lcp->filter].on2max = 0;
   j->cadence_f[lcp->filter].off2 = lcp->off2;
   j->cadence_f[lcp->filter].off2min = 0;
   j->cadence_f[lcp->filter].off2max = 0;
   j->cadence_f[lcp->filter].on3 = lcp->on3;
   j->cadence_f[lcp->filter].on3min = 0;
   j->cadence_f[lcp->filter].on3max = 0;
   j->cadence_f[lcp->filter].off3 = lcp->off3;
   j->cadence_f[lcp->filter].off3min = 0;
   j->cadence_f[lcp->filter].off3max = 0;
   if (ixjdebug & 0x0002) { printk(KERN_INFO "Cadence %d loaded\n", lcp->filter); }
   kfree(lcp);
   return 0;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
        {
		...
-		kfree(e1);
		...
-		return r;
+		return PTR_ERR(e1);
	}
<|end_of_text|> 
 | 119 
							 | 
					
	--- initial
+++ final
@@ -1,117 +1,113 @@
 static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 { /* @ ADG ou ATO selon le cas */
   int i;
   unsigned char IndexCard;
   void __iomem *pmem;
   int ret = 0;
   volatile unsigned char byte_reset_it;
   struct st_ram_io *adgl;
   void __user *argp = (void __user *)arg;
   /* In general, the device is only openable by root anyway, so we're not
      particularly concerned that bogus ioctls can flood the console. */
-  adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL);
-  if (!adgl) return -ENOMEM;
-  if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) {
-    kfree(adgl);
-    return -EFAULT;
-  }
+  adgl = memdup_user(argp, sizeof(struct st_ram_io));
+  if (IS_ERR(adgl)) return PTR_ERR(adgl);
   lock_kernel();
   IndexCard = adgl->num_card - 1;
 
   if (cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
     static int warncount = 10;
     if (warncount) {
       printk(KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n", (int)IndexCard + 1);
       warncount--;
     }
     kfree(adgl);
     unlock_kernel();
     return -EINVAL;
   }
   switch (cmd) {
 
   case 0:
     pmem = apbs[IndexCard].RamIO;
     for (i = 0; i < sizeof(struct st_ram_io); i++)
       ((unsigned char *)adgl)[i] = readb(pmem++);
     if (copy_to_user(argp, adgl, sizeof(struct st_ram_io))) ret = -EFAULT;
     break;
   case 1:
     pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
     for (i = 0; i < 4; i++)
       adgl->conf_end_test[i] = readb(pmem++);
     for (i = 0; i < 2; i++)
       adgl->error_code[i] = readb(pmem++);
     for (i = 0; i < 4; i++)
       adgl->parameter_error[i] = readb(pmem++);
     pmem = apbs[IndexCard].RamIO + VERS;
     adgl->vers = readb(pmem);
     pmem = apbs[IndexCard].RamIO + TYPE_CARD;
     for (i = 0; i < 20; i++)
       adgl->reserv1[i] = readb(pmem++);
     *(int *)&adgl->reserv1[20] = (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER) << 16) + (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 1) << 8) + (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 2));
     if (copy_to_user(argp, adgl, sizeof(struct st_ram_io))) ret = -EFAULT;
     break;
   case 2:
     pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
     for (i = 0; i < 10; i++)
       writeb(0xff, pmem++);
     writeb(adgl->data_from_pc_ready, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
     writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
 
     for (i = 0; i < MAX_BOARD; i++) {
       if (apbs[i].RamIO) { byte_reset_it = readb(apbs[i].RamIO + RAM_IT_TO_PC); }
     }
     break;
   case 3:
     pmem = apbs[IndexCard].RamIO + TIC_DES_FROM_PC;
     writeb(adgl->tic_des_from_pc, pmem);
     break;
   case 4:
     pmem = apbs[IndexCard].RamIO + TIC_OWNER_TO_PC;
     adgl->tic_owner_to_pc = readb(pmem++);
     adgl->numcard_owner_to_pc = readb(pmem);
     if (copy_to_user(argp, adgl, sizeof(struct st_ram_io))) ret = -EFAULT;
     break;
   case 5:
     writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
     writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
     writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
     writeb(4, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
     writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
     break;
   case 6:
     printk(KERN_INFO "APPLICOM driver release .... V2.8.0 ($Revision: 1.30 $)\n");
     printk(KERN_INFO "Number of installed boards . %d\n", (int)numboards);
     printk(KERN_INFO "Segment of board ........... %X\n", (int)mem);
     printk(KERN_INFO "Interrupt IRQ number ....... %d\n", (int)irq);
     for (i = 0; i < MAX_BOARD; i++) {
       int serial;
       char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
       if (!apbs[i].RamIO) continue;
       for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
         boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
       boardname[serial] = 0;
       printk(KERN_INFO "Prom version board %d ....... V%d.%d %s", i + 1, (int)(readb(apbs[IndexCard].RamIO + VERS) >> 4), (int)(readb(apbs[IndexCard].RamIO + VERS) & 0xF), boardname);
       serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) + (readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) + (readb(apbs[i].RamIO + SERIAL_NUMBER + 2));
       if (serial != 0)
         printk(" S/N %d\n", serial);
       else
         printk("\n");
     }
     if (DeviceErrorCount != 0) printk(KERN_INFO "DeviceErrorCount ........... %d\n", DeviceErrorCount);
     if (ReadErrorCount != 0) printk(KERN_INFO "ReadErrorCount ............. %d\n", ReadErrorCount);
     if (WriteErrorCount != 0) printk(KERN_INFO "WriteErrorCount ............ %d\n", WriteErrorCount);
     if (waitqueue_active(&FlagSleepRec)) printk(KERN_INFO "Process in read pending\n");
     for (i = 0; i < MAX_BOARD; i++) {
       if (apbs[i].RamIO && waitqueue_active(&apbs[i].FlagSleepSend)) printk(KERN_INFO "Process in write pending board %d\n", i + 1);
     }
     break;
   default: ret = -ENOTTY; break;
   }
   Dummy = readb(apbs[IndexCard].RamIO + VERS);
   kfree(adgl);
   unlock_kernel();
   return 0;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 120 
							 | 
					
	--- initial
+++ final
@@ -1,107 +1,95 @@
 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io) {
   int ctlr = h->ctlr;
   cmdlist_t *c;
   void *p = NULL;
   unsigned long flags;
   int error;
   if ((c = cmd_alloc(h, 0)) == NULL) return -ENOMEM;
   c->ctlr = ctlr;
   c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
   c->hdr.size = sizeof(rblk_t) >> 2;
   c->size += sizeof(rblk_t);
   c->req.hdr.cmd = io->cmd;
   c->req.hdr.blk = io->blk;
   c->req.hdr.blk_cnt = io->blk_cnt;
   c->type = CMD_IOCTL_PEND;
   /* Pre submit processing */
   switch (io->cmd) {
   case PASSTHRU_A:
-    p = kmalloc(io->sg[0].size, GFP_KERNEL);
-    if (!p) {
-      error = -ENOMEM;
+    p = memdup_user(io->sg[0].addr, io->sg[0].size);
+    if (IS_ERR(p)) {
       cmd_free(h, c, 0);
-      return (error);
-    }
-    if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
-      kfree(p);
-      cmd_free(h, c, 0);
-      return -EFAULT;
+      return PTR_ERR(p);
     }
     c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), sizeof(ida_ioctl_t), PCI_DMA_BIDIRECTIONAL);
     c->req.sg[0].size = io->sg[0].size;
     c->req.sg[0].addr = pci_map_single(h->pci_dev, p, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
     c->req.hdr.sg_cnt = 1;
     break;
   case IDA_READ:
   case READ_FLASH_ROM:
   case SENSE_CONTROLLER_PERFORMANCE:
     p = kmalloc(io->sg[0].size, GFP_KERNEL);
     if (!p) {
       error = -ENOMEM;
       cmd_free(h, c, 0);
       return (error);
     }
     c->req.sg[0].size = io->sg[0].size;
     c->req.sg[0].addr = pci_map_single(h->pci_dev, p, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
     c->req.hdr.sg_cnt = 1;
     break;
   case IDA_WRITE:
   case IDA_WRITE_MEDIA:
   case DIAG_PASS_THRU:
   case COLLECT_BUFFER:
   case WRITE_FLASH_ROM:
-    p = kmalloc(io->sg[0].size, GFP_KERNEL);
-    if (!p) {
-      error = -ENOMEM;
+    p = memdup_user(io->sg[0].addr, io->sg[0].size);
+    if (IS_ERR(p)) {
       cmd_free(h, c, 0);
-      return (error);
-    }
-    if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
-      kfree(p);
-      cmd_free(h, c, 0);
-      return -EFAULT;
+      return PTR_ERR(p);
     }
     c->req.sg[0].size = io->sg[0].size;
     c->req.sg[0].addr = pci_map_single(h->pci_dev, p, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
     c->req.hdr.sg_cnt = 1;
     break;
   default:
     c->req.sg[0].size = sizeof(io->c);
     c->req.sg[0].addr = pci_map_single(h->pci_dev, &io->c, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
     c->req.hdr.sg_cnt = 1;
   }
 
   /* Put the request on the tail of the request queue */
   spin_lock_irqsave(IDA_LOCK(ctlr), flags);
   addQ(&h->reqQ, c);
   h->Qdepth++;
   start_io(h);
   spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
   /* Wait for completion */
   while (c->type != CMD_IOCTL_DONE)
     schedule();
   /* Unmap the DMA  */
   pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
   /* Post submit processing */
   switch (io->cmd) {
   case PASSTHRU_A: pci_unmap_single(h->pci_dev, c->req.hdr.blk, sizeof(ida_ioctl_t), PCI_DMA_BIDIRECTIONAL);
   case IDA_READ:
   case DIAG_PASS_THRU:
   case SENSE_CONTROLLER_PERFORMANCE:
   case READ_FLASH_ROM:
     if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
       kfree(p);
       return -EFAULT;
     }
     /* fall through and free p */
   case IDA_WRITE:
   case IDA_WRITE_MEDIA:
   case COLLECT_BUFFER:
   case WRITE_FLASH_ROM: kfree(p); break;
   default:;
     /* Nothing to do */
   }
   io->rcode = c->req.hdr.rcode;
   cmd_free(h, c, 0);
   return (0);
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
        {
		...
-		kfree(e1);
		...
-		return r;
+		return PTR_ERR(e1);
	}
<|end_of_text|> 
 | 121 
							 | 
					
	--- initial
+++ final
@@ -1,43 +1,39 @@
 int qeth_snmp_command(struct qeth_card *card, char __user *udata) {
   struct qeth_cmd_buffer *iob;
   struct qeth_ipa_cmd *cmd;
   struct qeth_snmp_ureq *ureq;
   int req_len;
   struct qeth_arp_query_info qinfo = {
       0,
   };
   int rc = 0;
   QETH_CARD_TEXT(card, 3, "snmpcmd");
   if (card->info.guestlan) return -EOPNOTSUPP;
   if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && (!card->options.layer2)) { return -EOPNOTSUPP; }
   /* skip 4 bytes (data_len struct member) to get req_len */
   if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) return -EFAULT;
-  ureq = kmalloc(req_len + sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
-  if (!ureq) {
+  ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+  if (IS_ERR(ureq)) {
     QETH_CARD_TEXT(card, 2, "snmpnome");
-    return -ENOMEM;
-  }
-  if (copy_from_user(ureq, udata, req_len + sizeof(struct qeth_snmp_ureq_hdr))) {
-    kfree(ureq);
-    return -EFAULT;
+    return PTR_ERR(ureq);
   }
   qinfo.udata_len = ureq->hdr.data_len;
   qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
   if (!qinfo.udata) {
     kfree(ureq);
     return -ENOMEM;
   }
   qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
   iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, QETH_SNMP_SETADP_CMDLENGTH + req_len);
   cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
   memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
   rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, qeth_snmp_command_cb, (void *)&qinfo);
   if (rc)
     QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n", QETH_CARD_IFNAME(card), rc);
   else {
     if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) rc = -EFAULT;
   }
   kfree(ureq);
   kfree(qinfo.udata);
   return rc;
 }<sep>@@
expression e1,e2,e3,r;
constant GFP_KERNEL;
@@
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1)) {
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL) {
		...
-		return ...;
-	}
-	if (copy_from_user(e1, e2, e3))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
	}
<|end_of_text|> 
 | 122 
							 | 
					
	--- initial
+++ final
@@ -1,15 +1,11 @@
 char *strndup_user(const char __user *s, long n) {
   char *p;
   long length;
   length = strnlen_user(s, n);
   if (!length) return ERR_PTR(-EFAULT);
   if (length > n) return ERR_PTR(-EINVAL);
-  p = kmalloc(length, GFP_KERNEL);
-  if (!p) return ERR_PTR(-ENOMEM);
-  if (copy_from_user(p, s, length)) {
-    kfree(p);
-    return ERR_PTR(-EFAULT);
-  }
+  p = memdup_user(s, length);
+  if (IS_ERR(p)) return PTR_ERR(p);
   p[length - 1] = '\0';
   return p;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 123 
							 | 
					
	--- initial
+++ final
@@ -1,72 +1,68 @@
 static int sg_start_req(Sg_request *srp, unsigned char *cmd) {
   int res;
   struct request *rq;
   Sg_fd *sfp = srp->parentfp;
   sg_io_hdr_t *hp = &srp->header;
   int dxfer_len = (int)hp->dxfer_len;
   int dxfer_dir = hp->dxfer_direction;
   unsigned int iov_count = hp->iovec_count;
   Sg_scatter_hold *req_schp = &srp->data;
   Sg_scatter_hold *rsv_schp = &sfp->reserve;
   struct request_queue *q = sfp->parentdp->device->request_queue;
   struct rq_map_data *md, map_data;
   int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
   SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n", dxfer_len));
   rq = blk_get_request(q, rw, GFP_ATOMIC);
   if (!rq) return -ENOMEM;
   memcpy(rq->cmd, cmd, hp->cmd_len);
   rq->cmd_len = hp->cmd_len;
   rq->cmd_type = REQ_TYPE_BLOCK_PC;
   srp->rq = rq;
   rq->end_io_data = srp;
   rq->sense = srp->sense_b;
   rq->retries = SG_DEFAULT_RETRIES;
   if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) return 0;
   if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && !sfp->parentdp->device->host->unchecked_isa_dma && blk_rq_aligned(q, hp->dxferp, dxfer_len))
     md = NULL;
   else
     md = &map_data;
   if (md) {
     if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
       sg_link_reserve(sfp, srp, dxfer_len);
     else {
       res = sg_build_indirect(req_schp, sfp, dxfer_len);
       if (res) return res;
     }
     md->pages = req_schp->pages;
     md->page_order = req_schp->page_order;
     md->nr_entries = req_schp->k_use_sg;
     md->offset = 0;
     md->null_mapped = hp->dxferp ? 0 : 1;
     if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
       md->from_user = 1;
     else
       md->from_user = 0;
   }
   if (iov_count) {
     int len, size = sizeof(struct sg_iovec) * iov_count;
     struct iovec *iov;
-    iov = kmalloc(size, GFP_ATOMIC);
-    if (!iov) return -ENOMEM;
-    if (copy_from_user(iov, hp->dxferp, size)) {
-      kfree(iov);
-      return -EFAULT;
-    }
+    iov = memdup_user(hp->dxferp, size);
+    if (IS_ERR(iov)) return PTR_ERR(iov);
     len = iov_length(iov, iov_count);
     if (hp->dxfer_len < len) {
       iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
       len = hp->dxfer_len;
     }
     res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov, iov_count, len, GFP_ATOMIC);
     kfree(iov);
   } else
     res = blk_rq_map_user(q, rq, md, hp->dxferp, hp->dxfer_len, GFP_ATOMIC);
   if (!res) {
     srp->bio = rq->bio;
     if (!md) {
       req_schp->dio_in_use = 1;
       hp->info |= SG_INFO_DIRECT_IO;
     }
   }
   return res;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 124 
							 | 
					
	--- initial
+++ final
@@ -1,59 +1,55 @@
 static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client, unsigned long arg) {
   struct i2c_rdwr_ioctl_data rdwr_arg;
   struct i2c_msg *rdwr_pa;
   u8 __user **data_ptrs;
   int i, res;
   if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data __user *)arg, sizeof(rdwr_arg))) return -EFAULT;
   /* Put an arbitrary limit on the number of messages that can
    * be sent at once */
   if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS) return -EINVAL;
-  rdwr_pa = kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg), GFP_KERNEL);
-  if (!rdwr_pa) return -ENOMEM;
-  if (copy_from_user(rdwr_pa, rdwr_arg.msgs, rdwr_arg.nmsgs * sizeof(struct i2c_msg))) {
-    kfree(rdwr_pa);
-    return -EFAULT;
-  }
+  rdwr_pa = memdup_user(rdwr_arg.msgs, rdwr_arg.nmsgs * sizeof(struct i2c_msg));
+  if (IS_ERR(rdwr_pa)) return PTR_ERR(rdwr_pa);
   data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
   if (data_ptrs == NULL) {
     kfree(rdwr_pa);
     return -ENOMEM;
   }
   res = 0;
   for (i = 0; i < rdwr_arg.nmsgs; i++) {
     /* Limit the size of the message to a sane amount;
      * and don't let length change either. */
     if ((rdwr_pa[i].len > 8192) || (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
       res = -EINVAL;
       break;
     }
     data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
     rdwr_pa[i].buf = kmalloc(rdwr_pa[i].len, GFP_KERNEL);
     if (rdwr_pa[i].buf == NULL) {
       res = -ENOMEM;
       break;
     }
     if (copy_from_user(rdwr_pa[i].buf, data_ptrs[i], rdwr_pa[i].len)) {
       ++i; /* Needs to be kfreed too */
       res = -EFAULT;
       break;
     }
   }
   if (res < 0) {
     int j;
     for (j = 0; j < i; ++j)
       kfree(rdwr_pa[j].buf);
     kfree(data_ptrs);
     kfree(rdwr_pa);
     return res;
   }
   res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs);
   while (i-- > 0) {
     if (res >= 0 && (rdwr_pa[i].flags & I2C_M_RD)) {
       if (copy_to_user(data_ptrs[i], rdwr_pa[i].buf, rdwr_pa[i].len)) res = -EFAULT;
     }
     kfree(rdwr_pa[i].buf);
   }
   kfree(data_ptrs);
   kfree(rdwr_pa);
   return res;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 125 
							 | 
					
	--- initial
+++ final
@@ -1,16 +1,12 @@
 static ssize_t i2cdev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) {
   int ret;
   char *tmp;
   struct i2c_client *client = file->private_data;
   if (count > 8192) count = 8192;
-  tmp = kmalloc(count, GFP_KERNEL);
-  if (tmp == NULL) return -ENOMEM;
-  if (copy_from_user(tmp, buf, count)) {
-    kfree(tmp);
-    return -EFAULT;
-  }
+  tmp = memdup_user(buf, count);
+  if (IS_ERR(tmp)) return PTR_ERR(tmp);
   pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n", iminor(file->f_path.dentry->d_inode), count);
   ret = i2c_master_send(client, tmp, count);
   kfree(tmp);
   return ret;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 126 
							 | 
					
	--- initial
+++ final
@@ -1,18 +1,14 @@
 static noinline int btrfs_ioctl_ino_lookup(struct file *file, void __user *argp) {
   struct btrfs_ioctl_ino_lookup_args *args;
   struct inode *inode;
   int ret;
   if (!capable(CAP_SYS_ADMIN)) return -EPERM;
-  args = kmalloc(sizeof(*args), GFP_KERNEL);
-  if (!args) return -ENOMEM;
-  if (copy_from_user(args, argp, sizeof(*args))) {
-    kfree(args);
-    return -EFAULT;
-  }
+  args = memdup_user(argp, sizeof(*args));
+  if (IS_ERR(args)) return PTR_ERR(args);
   inode = fdentry(file)->d_inode;
   if (args->treeid == 0) args->treeid = BTRFS_I(inode)->root->root_key.objectid;
   ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, args->treeid, args->objectid, args->name);
   if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) ret = -EFAULT;
   kfree(args);
   return ret;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 127 
							 | 
					
	--- initial
+++ final
@@ -1,17 +1,13 @@
 static noinline int btrfs_ioctl_tree_search(struct file *file, void __user *argp) {
   struct btrfs_ioctl_search_args *args;
   struct inode *inode;
   int ret;
   if (!capable(CAP_SYS_ADMIN)) return -EPERM;
-  args = kmalloc(sizeof(*args), GFP_KERNEL);
-  if (!args) return -ENOMEM;
-  if (copy_from_user(args, argp, sizeof(*args))) {
-    kfree(args);
-    return -EFAULT;
-  }
+  args = memdup_user(argp, sizeof(*args));
+  if (IS_ERR(args)) return PTR_ERR(args);
   inode = fdentry(file)->d_inode;
   ret = search_ioctl(inode, args);
   if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) ret = -EFAULT;
   kfree(args);
   return ret;
 }<sep>@@
expression e1,e2,e3,r;
statement S;
constant GFP_KERNEL;
@@
-	e1 = kmalloc(e3, GFP_KERNEL);
-	if (e1 == NULL)
-		S
-	if (copy_from_user(e1, e2, e3))
+	e1 = memdup_user(e2, e3);
+	if (IS_ERR(e1))
-       {
-		kfree(e1);
-		return r;
+		return PTR_ERR(e1);
-	}
<|end_of_text|> 
 | 128 
							 | 
					
	--- initial
+++ final
@@ -1,53 +1,54 @@
 static int gsta_probe(struct platform_device *dev) {
   int i, err;
   struct pci_dev *pdev;
   struct sta2x11_gpio_pdata *gpio_pdata;
   struct gsta_gpio *chip;
   struct resource *res;
   pdev = *(struct pci_dev **)(dev->dev.platform_data);
   gpio_pdata = dev_get_platdata(&pdev->dev);
   if (gpio_pdata == NULL) dev_err(&dev->dev, "no gpio config\n");
   pr_debug("gpio config: %p\n", gpio_pdata);
   res = platform_get_resource(dev, IORESOURCE_MEM, 0);
   chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
   if (!chip) return -ENOMEM;
   chip->dev = &dev->dev;
-  chip->reg_base = devm_request_and_ioremap(&dev->dev, res);
+  chip->reg_base = devm_ioremap_resource(&dev->dev, res);
+  if (IS_ERR(chip->reg_base)) return PTR_ERR(chip->reg_base);
   for (i = 0; i < GSTA_NR_BLOCKS; i++) {
     chip->regs[i] = chip->reg_base + i * 4096;
     /* disable all irqs */
     writel(0, &chip->regs[i]->rimsc);
     writel(0, &chip->regs[i]->fimsc);
     writel(~0, &chip->regs[i]->ic);
   }
   spin_lock_init(&chip->lock);
   gsta_gpio_setup(chip);
   if (gpio_pdata)
     for (i = 0; i < GSTA_NR_GPIO; i++)
       gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
   /* 384 was used in previous code: be compatible for other drivers */
   err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
   if (err < 0) {
     dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n", -err);
     return err;
   }
   chip->irq_base = err;
   gsta_alloc_irq_chip(chip);
   err = request_irq(pdev->irq, gsta_gpio_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
   if (err < 0) {
     dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n", -err);
     goto err_free_descs;
   }
   err = gpiochip_add(&chip->gpio);
   if (err < 0) {
     dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n", -err);
     goto err_free_irq;
   }
   platform_set_drvdata(dev, chip);
   return 0;
 err_free_irq:
   free_irq(pdev->irq, chip);
 err_free_descs:
   irq_free_descs(chip->irq_base, GSTA_NR_GPIO);
   return err;
 }<sep>@@
expression e,d,res; 
statement S1,S2;
@@
-	e = devm_request_and_ioremap(d, res);
+	e = devm_ioremap_resource(d, res);
+	if (IS_ERR(e)) return PTR_ERR(e);
	... when != if (e == NULL || ...) S1 else S2
            when != if (IS_ERR(e) || ...) S1 else S2<|end_of_text|> 
 | 188 
							 | 
					
	--- initial
+++ final
@@ -1,53 +1,54 @@
 static int gsta_probe(struct platform_device *dev) {
   int i, err;
   struct pci_dev *pdev;
   struct sta2x11_gpio_pdata *gpio_pdata;
   struct gsta_gpio *chip;
   struct resource *res;
   pdev = *(struct pci_dev **)(dev->dev.platform_data);
   gpio_pdata = dev_get_platdata(&pdev->dev);
   if (gpio_pdata == NULL) dev_err(&dev->dev, "no gpio config\n");
   pr_debug("gpio config: %p\n", gpio_pdata);
   res = platform_get_resource(dev, IORESOURCE_MEM, 0);
   chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
   if (!chip) return -ENOMEM;
   chip->dev = &dev->dev;
-  chip->reg_base = devm_request_and_ioremap(&dev->dev, res);
+  chip->reg_base = devm_ioremap_resource(&dev->dev, res);
+  if (IS_ERR(chip->reg_base)) return PTR_ERR(chip->reg_base);
   for (i = 0; i < GSTA_NR_BLOCKS; i++) {
     chip->regs[i] = chip->reg_base + i * 4096;
     /* disable all irqs */
     writel(0, &chip->regs[i]->rimsc);
     writel(0, &chip->regs[i]->fimsc);
     writel(~0, &chip->regs[i]->ic);
   }
   spin_lock_init(&chip->lock);
   gsta_gpio_setup(chip);
   if (gpio_pdata)
     for (i = 0; i < GSTA_NR_GPIO; i++)
       gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
   /* 384 was used in previous code: be compatible for other drivers */
   err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
   if (err < 0) {
     dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n", -err);
     return err;
   }
   chip->irq_base = err;
   gsta_alloc_irq_chip(chip);
   err = request_irq(pdev->irq, gsta_gpio_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
   if (err < 0) {
     dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n", -err);
     goto err_free_descs;
   }
   err = gpiochip_add(&chip->gpio);
   if (err < 0) {
     dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n", -err);
     goto err_free_irq;
   }
   platform_set_drvdata(dev, chip);
   return 0;
 err_free_irq:
   free_irq(pdev->irq, chip);
 err_free_descs:
   irq_free_descs(chip->irq_base, GSTA_NR_GPIO);
   return err;
 }<sep>@@
expression e,d,res; 
statement S1,S2;
@@
-	e = devm_request_and_ioremap(d, res);
+	e = devm_ioremap_resource(d, res);
+	if (IS_ERR(e)) return PTR_ERR(e);
	... when != if (e == NULL || ...) S1 else S2
            when != if (IS_ERR(e) || ...) S1 else S2<|end_of_text|> 
 | 189 
							 | 
					
	--- initial
+++ final
@@ -1,116 +1,116 @@
 static int c_can_plat_probe(struct platform_device *pdev) {
   int ret;
   void __iomem *addr;
   struct net_device *dev;
   struct c_can_priv *priv;
   const struct of_device_id *match;
   const struct platform_device_id *id;
   struct pinctrl *pinctrl;
   struct resource *mem, *res;
   int irq;
   struct clk *clk;
   if (pdev->dev.of_node) {
     match = of_match_device(c_can_of_table, &pdev->dev);
     if (!match) {
       dev_err(&pdev->dev, "Failed to find matching dt id\n");
       ret = -EINVAL;
       goto exit;
     }
     id = match->data;
   } else {
     id = platform_get_device_id(pdev);
   }
   pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
   if (IS_ERR(pinctrl)) dev_warn(&pdev->dev, "failed to configure pins from driver\n");
   /* get the appropriate clk */
   clk = clk_get(&pdev->dev, NULL);
   if (IS_ERR(clk)) {
     dev_err(&pdev->dev, "no clock defined\n");
     ret = -ENODEV;
     goto exit;
   }
   /* get the platform data */
   mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   irq = platform_get_irq(pdev, 0);
   if (!mem || irq <= 0) {
     ret = -ENODEV;
     goto exit_free_clk;
   }
   if (!request_mem_region(mem->start, resource_size(mem), KBUILD_MODNAME)) {
     dev_err(&pdev->dev, "resource unavailable\n");
     ret = -ENODEV;
     goto exit_free_clk;
   }
   addr = ioremap(mem->start, resource_size(mem));
   if (!addr) {
     dev_err(&pdev->dev, "failed to map can port\n");
     ret = -ENOMEM;
     goto exit_release_mem;
   }
   /* allocate the c_can device */
   dev = alloc_c_can_dev();
   if (!dev) {
     ret = -ENOMEM;
     goto exit_iounmap;
   }
   priv = netdev_priv(dev);
   switch (id->driver_data) {
   case BOSCH_C_CAN:
     priv->regs = reg_map_c_can;
     switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
     case IORESOURCE_MEM_32BIT:
       priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
       priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
       break;
     case IORESOURCE_MEM_16BIT:
     default:
       priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
       priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
       break;
     }
     break;
   case BOSCH_D_CAN:
     priv->regs = reg_map_d_can;
     priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
     priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
     priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
     if (pdev->dev.of_node)
       priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
     else
       priv->instance = pdev->id;
     res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-    priv->raminit_ctrlreg = devm_request_and_ioremap(&pdev->dev, res);
-    if (!priv->raminit_ctrlreg || priv->instance < 0)
+    priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+    if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
       dev_info(&pdev->dev, "control memory is not used for raminit\n");
     else
       priv->raminit = c_can_hw_raminit;
     break;
   default: ret = -EINVAL; goto exit_free_device;
   }
   dev->irq = irq;
   priv->base = addr;
   priv->device = &pdev->dev;
   priv->can.clock.freq = clk_get_rate(clk);
   priv->priv = clk;
   priv->type = id->driver_data;
   platform_set_drvdata(pdev, dev);
   SET_NETDEV_DEV(dev, &pdev->dev);
   ret = register_c_can_dev(dev);
   if (ret) {
     dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret);
     goto exit_free_device;
   }
   dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->base, dev->irq);
   return 0;
 exit_free_device:
   free_c_can_dev(dev);
 exit_iounmap:
   iounmap(addr);
 exit_release_mem:
   release_mem_region(mem->start, resource_size(mem));
 exit_free_clk:
   clk_put(clk);
 exit:
   dev_err(&pdev->dev, "probe failed\n");
   return ret;
 }<sep>@@
expression e,d,res;
statement S1,S2; 
@@
-	e = devm_request_and_ioremap(d, res);
+	e = devm_ioremap_resource(d, res);
	if (
-	    e == NULL
+	    IS_ERR(e)
	    || ...)
	S1 else S2
<|end_of_text|> 
 | 190 
							 | 
					
	--- initial
+++ final
@@ -1,116 +1,116 @@
 static int c_can_plat_probe(struct platform_device *pdev) {
   int ret;
   void __iomem *addr;
   struct net_device *dev;
   struct c_can_priv *priv;
   const struct of_device_id *match;
   const struct platform_device_id *id;
   struct pinctrl *pinctrl;
   struct resource *mem, *res;
   int irq;
   struct clk *clk;
   if (pdev->dev.of_node) {
     match = of_match_device(c_can_of_table, &pdev->dev);
     if (!match) {
       dev_err(&pdev->dev, "Failed to find matching dt id\n");
       ret = -EINVAL;
       goto exit;
     }
     id = match->data;
   } else {
     id = platform_get_device_id(pdev);
   }
   pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
   if (IS_ERR(pinctrl)) dev_warn(&pdev->dev, "failed to configure pins from driver\n");
   /* get the appropriate clk */
   clk = clk_get(&pdev->dev, NULL);
   if (IS_ERR(clk)) {
     dev_err(&pdev->dev, "no clock defined\n");
     ret = -ENODEV;
     goto exit;
   }
   /* get the platform data */
   mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   irq = platform_get_irq(pdev, 0);
   if (!mem || irq <= 0) {
     ret = -ENODEV;
     goto exit_free_clk;
   }
   if (!request_mem_region(mem->start, resource_size(mem), KBUILD_MODNAME)) {
     dev_err(&pdev->dev, "resource unavailable\n");
     ret = -ENODEV;
     goto exit_free_clk;
   }
   addr = ioremap(mem->start, resource_size(mem));
   if (!addr) {
     dev_err(&pdev->dev, "failed to map can port\n");
     ret = -ENOMEM;
     goto exit_release_mem;
   }
   /* allocate the c_can device */
   dev = alloc_c_can_dev();
   if (!dev) {
     ret = -ENOMEM;
     goto exit_iounmap;
   }
   priv = netdev_priv(dev);
   switch (id->driver_data) {
   case BOSCH_C_CAN:
     priv->regs = reg_map_c_can;
     switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
     case IORESOURCE_MEM_32BIT:
       priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
       priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
       break;
     case IORESOURCE_MEM_16BIT:
     default:
       priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
       priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
       break;
     }
     break;
   case BOSCH_D_CAN:
     priv->regs = reg_map_d_can;
     priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
     priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
     priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
     if (pdev->dev.of_node)
       priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
     else
       priv->instance = pdev->id;
     res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-    priv->raminit_ctrlreg = devm_request_and_ioremap(&pdev->dev, res);
-    if (!priv->raminit_ctrlreg || priv->instance < 0)
+    priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+    if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
       dev_info(&pdev->dev, "control memory is not used for raminit\n");
     else
       priv->raminit = c_can_hw_raminit;
     break;
   default: ret = -EINVAL; goto exit_free_device;
   }
   dev->irq = irq;
   priv->base = addr;
   priv->device = &pdev->dev;
   priv->can.clock.freq = clk_get_rate(clk);
   priv->priv = clk;
   priv->type = id->driver_data;
   platform_set_drvdata(pdev, dev);
   SET_NETDEV_DEV(dev, &pdev->dev);
   ret = register_c_can_dev(dev);
   if (ret) {
     dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret);
     goto exit_free_device;
   }
   dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->base, dev->irq);
   return 0;
 exit_free_device:
   free_c_can_dev(dev);
 exit_iounmap:
   iounmap(addr);
 exit_release_mem:
   release_mem_region(mem->start, resource_size(mem));
 exit_free_clk:
   clk_put(clk);
 exit:
   dev_err(&pdev->dev, "probe failed\n");
   return ret;
 }<sep>@@
expression e,d,res;
statement S1,S2; 
@@
-	e = devm_request_and_ioremap(d, res);
+	e = devm_ioremap_resource(d, res);
	if (
-	    e == NULL
+	    IS_ERR(e)
	    || ...)
	S1 else S2
<|end_of_text|> 
 | 191 
							 | 
					
	--- initial
+++ final
@@ -1,7 +1,7 @@
 static void __iomem *__init mvebu_pcie_map_registers(struct platform_device *pdev, struct device_node *np, struct mvebu_pcie_port *port) {
   struct resource regs;
   int ret = 0;
   ret = of_address_to_resource(np, 0, ®s);
   if (ret) return NULL;
-  return devm_request_and_ioremap(&pdev->dev, ®s);
+  return devm_ioremap_resource(&pdev->dev, ®s);
 }<sep>@@
expression d,res;
@@
	return
-		devm_request_and_ioremap(d, res);
+		devm_ioremap_resource(d, res);
<|end_of_text|> 
 | 192 
							 | 
					
	--- initial
+++ final
@@ -1,7 +1,7 @@
 static void __iomem *__init mvebu_pcie_map_registers(struct platform_device *pdev, struct device_node *np, struct mvebu_pcie_port *port) {
   struct resource regs;
   int ret = 0;
   ret = of_address_to_resource(np, 0, ®s);
   if (ret) return NULL;
-  return devm_request_and_ioremap(&pdev->dev, ®s);
+  return devm_ioremap_resource(&pdev->dev, ®s);
 }<sep>@@
expression d,res;
@@
	return
-		devm_request_and_ioremap(d, res);
+		devm_ioremap_resource(d, res);
<|end_of_text|> 
 | 193 
							 | 
					
	--- initial
+++ final
@@ -1,77 +1,74 @@
 static int pxa3xx_gcu_probe(struct platform_device *pdev) {
   int i, ret, irq;
   struct resource *r;
   struct pxa3xx_gcu_priv *priv;
   struct device *dev = &pdev->dev;
   priv = devm_kzalloc(dev, sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
   if (!priv) return -ENOMEM;
   init_waitqueue_head(&priv->wait_idle);
   init_waitqueue_head(&priv->wait_free);
   spin_lock_init(&priv->spinlock);
   /* we allocate the misc device structure as part of our own allocation,
    * so we can get a pointer to our priv structure later on with
    * container_of(). This isn't really necessary as we have a fixed minor
    * number anyway, but this is to avoid statics. */
   priv->misc_dev.minor = MISCDEV_MINOR, priv->misc_dev.name = DRV_NAME, priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
   /* handle IO resources */
   r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-  priv->mmio_base = devm_request_and_ioremap(dev, r);
-  if (IS_ERR(priv->mmio_base)) {
-    dev_err(dev, "failed to map I/O memory\n");
-    return PTR_ERR(priv->mmio_base);
-  }
+  priv->mmio_base = devm_ioremap_resource(dev, r);
+  if (IS_ERR(priv->mmio_base)) return PTR_ERR(priv->mmio_base);
   /* enable the clock */
   priv->clk = devm_clk_get(dev, NULL);
   if (IS_ERR(priv->clk)) {
     dev_err(dev, "failed to get clock\n");
     return PTR_ERR(priv->clk);
   }
   /* request the IRQ */
   irq = platform_get_irq(pdev, 0);
   if (irq < 0) {
     dev_err(dev, "no IRQ defined\n");
     return -ENODEV;
   }
   ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq, 0, DRV_NAME, priv);
   if (ret < 0) {
     dev_err(dev, "request_irq failed\n");
     return ret;
   }
   /* allocate dma memory */
   priv->shared = dma_alloc_coherent(dev, SHARED_SIZE, &priv->shared_phys, GFP_KERNEL);
   if (!priv->shared) {
     dev_err(dev, "failed to allocate DMA memory\n");
     return -ENOMEM;
   }
   /* register misc device */
   ret = misc_register(&priv->misc_dev);
   if (ret < 0) {
     dev_err(dev, "misc_register() for minor %d failed\n", MISCDEV_MINOR);
     goto err_free_dma;
   }
   ret = clk_enable(priv->clk);
   if (ret < 0) {
     dev_err(dev, "failed to enable clock\n");
     goto err_misc_deregister;
   }
   for (i = 0; i < 8; i++) {
     ret = pxa3xx_gcu_add_buffer(dev, priv);
     if (ret) {
       dev_err(dev, "failed to allocate DMA memory\n");
       goto err_disable_clk;
     }
   }
   platform_set_drvdata(pdev, priv);
   priv->resource_mem = r;
   pxa3xx_gcu_reset(priv);
   pxa3xx_gcu_init_debug_timer();
   dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n", (void *)r->start, (void *)priv->shared_phys, SHARED_SIZE, irq);
   return 0;
 err_free_dma:
   dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
 err_misc_deregister:
   misc_deregister(&priv->misc_dev);
 err_disable_clk:
   clk_disable(priv->clk);
   return ret;
 }<sep>@@
expression e,d,res;
statement S,S1;
@@
-	e = devm_request_and_ioremap(d, res);
+	e = devm_ioremap_resource(d, res);
	if (IS_ERR(e))
-	{
- 		dev_err(...);
		S
-	}
	else S1
<|end_of_text|> 
 | 200 
							 | 
					
	--- initial
+++ final
@@ -1,77 +1,74 @@
 static int pxa3xx_gcu_probe(struct platform_device *pdev) {
   int i, ret, irq;
   struct resource *r;
   struct pxa3xx_gcu_priv *priv;
   struct device *dev = &pdev->dev;
   priv = devm_kzalloc(dev, sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
   if (!priv) return -ENOMEM;
   init_waitqueue_head(&priv->wait_idle);
   init_waitqueue_head(&priv->wait_free);
   spin_lock_init(&priv->spinlock);
   /* we allocate the misc device structure as part of our own allocation,
    * so we can get a pointer to our priv structure later on with
    * container_of(). This isn't really necessary as we have a fixed minor
    * number anyway, but this is to avoid statics. */
   priv->misc_dev.minor = MISCDEV_MINOR, priv->misc_dev.name = DRV_NAME, priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
   /* handle IO resources */
   r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-  priv->mmio_base = devm_request_and_ioremap(dev, r);
-  if (IS_ERR(priv->mmio_base)) {
-    dev_err(dev, "failed to map I/O memory\n");
-    return PTR_ERR(priv->mmio_base);
-  }
+  priv->mmio_base = devm_ioremap_resource(dev, r);
+  if (IS_ERR(priv->mmio_base)) return PTR_ERR(priv->mmio_base);
   /* enable the clock */
   priv->clk = devm_clk_get(dev, NULL);
   if (IS_ERR(priv->clk)) {
     dev_err(dev, "failed to get clock\n");
     return PTR_ERR(priv->clk);
   }
   /* request the IRQ */
   irq = platform_get_irq(pdev, 0);
   if (irq < 0) {
     dev_err(dev, "no IRQ defined\n");
     return -ENODEV;
   }
   ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq, 0, DRV_NAME, priv);
   if (ret < 0) {
     dev_err(dev, "request_irq failed\n");
     return ret;
   }
   /* allocate dma memory */
   priv->shared = dma_alloc_coherent(dev, SHARED_SIZE, &priv->shared_phys, GFP_KERNEL);
   if (!priv->shared) {
     dev_err(dev, "failed to allocate DMA memory\n");
     return -ENOMEM;
   }
   /* register misc device */
   ret = misc_register(&priv->misc_dev);
   if (ret < 0) {
     dev_err(dev, "misc_register() for minor %d failed\n", MISCDEV_MINOR);
     goto err_free_dma;
   }
   ret = clk_enable(priv->clk);
   if (ret < 0) {
     dev_err(dev, "failed to enable clock\n");
     goto err_misc_deregister;
   }
   for (i = 0; i < 8; i++) {
     ret = pxa3xx_gcu_add_buffer(dev, priv);
     if (ret) {
       dev_err(dev, "failed to allocate DMA memory\n");
       goto err_disable_clk;
     }
   }
   platform_set_drvdata(pdev, priv);
   priv->resource_mem = r;
   pxa3xx_gcu_reset(priv);
   pxa3xx_gcu_init_debug_timer();
   dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n", (void *)r->start, (void *)priv->shared_phys, SHARED_SIZE, irq);
   return 0;
 err_free_dma:
   dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
 err_misc_deregister:
   misc_deregister(&priv->misc_dev);
 err_disable_clk:
   clk_disable(priv->clk);
   return ret;
 }<sep>@@
expression e,d,res;
statement S,S1;
@@
-	e = devm_request_and_ioremap(d, res);
+	e = devm_ioremap_resource(d, res);
	if (IS_ERR(e))
-	{
- 		dev_err(...);
		S
-	}
	else S1
<|end_of_text|> 
 | 201 
							 | 
					
	--- initial
+++ final
@@ -1,46 +1,43 @@
 static int nbu2ss_drv_probe(struct platform_device *pdev) {
   int status = -ENODEV;
   struct nbu2ss_udc *udc;
   struct resource *r;
   int irq;
   void __iomem *mmio_base;
   udc = &udc_controller;
   memset(udc, 0, sizeof(struct nbu2ss_udc));
   platform_set_drvdata(pdev, udc);
   /* require I/O memory and IRQ to be provided as resources */
   r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-  mmio_base = devm_request_and_ioremap(&pdev->dev, r);
-  if (IS_ERR(mmio_base)) {
-    dev_err(&pdev->dev, "failed to map I/O memory\n");
-    return PTR_ERR(mmio_base);
-  }
+  mmio_base = devm_ioremap_resource(&pdev->dev, r);
+  if (IS_ERR(mmio_base)) return PTR_ERR(mmio_base);
   irq = platform_get_irq(pdev, 0);
   if (irq < 0) {
     dev_err(&pdev->dev, "failed to get IRQ\n");
     return irq;
   }
   status = devm_request_irq(&pdev->dev, irq, _nbu2ss_udc_irq, 0, driver_name, udc);
   /* IO Memory */
   udc->p_regs = (PT_FC_REGS)mmio_base;
   /* USB Function Controller Interrupt */
   if (status != 0) {
     ERR("request_irq(USB_UDC_IRQ_1) failed\n");
     goto cleanup1;
   }
   /* Driver Initialization */
   status = nbu2ss_drv_contest_init(pdev, udc);
   if (status < 0) {
     /* Error */
     goto cleanup1;
   }
   /* VBUS Interrupt */
   irq_set_irq_type(INT_VBUS, IRQ_TYPE_EDGE_BOTH);
   status = request_irq(INT_VBUS, _nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc);
   if (status != 0) {
     ERR("request_irq(INT_VBUS) failed\n");
     goto cleanup1;
   }
   return status;
 cleanup1:
   return status;
 }<sep>@@
expression e,d,res;
statement S,S1;
@@
-	e = devm_request_and_ioremap(d, res);
+	e = devm_ioremap_resource(d, res);
	if (IS_ERR(e))
-	{
- 		dev_err(...);
		S
-	}
	else S1
<|end_of_text|> 
 | 204 
							 | 
					
	--- initial
+++ final
@@ -1,46 +1,43 @@
 static int nbu2ss_drv_probe(struct platform_device *pdev) {
   int status = -ENODEV;
   struct nbu2ss_udc *udc;
   struct resource *r;
   int irq;
   void __iomem *mmio_base;
   udc = &udc_controller;
   memset(udc, 0, sizeof(struct nbu2ss_udc));
   platform_set_drvdata(pdev, udc);
   /* require I/O memory and IRQ to be provided as resources */
   r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-  mmio_base = devm_request_and_ioremap(&pdev->dev, r);
-  if (IS_ERR(mmio_base)) {
-    dev_err(&pdev->dev, "failed to map I/O memory\n");
-    return PTR_ERR(mmio_base);
-  }
+  mmio_base = devm_ioremap_resource(&pdev->dev, r);
+  if (IS_ERR(mmio_base)) return PTR_ERR(mmio_base);
   irq = platform_get_irq(pdev, 0);
   if (irq < 0) {
     dev_err(&pdev->dev, "failed to get IRQ\n");
     return irq;
   }
   status = devm_request_irq(&pdev->dev, irq, _nbu2ss_udc_irq, 0, driver_name, udc);
   /* IO Memory */
   udc->p_regs = (PT_FC_REGS)mmio_base;
   /* USB Function Controller Interrupt */
   if (status != 0) {
     ERR("request_irq(USB_UDC_IRQ_1) failed\n");
     goto cleanup1;
   }
   /* Driver Initialization */
   status = nbu2ss_drv_contest_init(pdev, udc);
   if (status < 0) {
     /* Error */
     goto cleanup1;
   }
   /* VBUS Interrupt */
   irq_set_irq_type(INT_VBUS, IRQ_TYPE_EDGE_BOTH);
   status = request_irq(INT_VBUS, _nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc);
   if (status != 0) {
     ERR("request_irq(INT_VBUS) failed\n");
     goto cleanup1;
   }
   return status;
 cleanup1:
   return status;
 }<sep>@@
expression e,d,res;
statement S,S1;
@@
-	e = devm_request_and_ioremap(d, res);
+	e = devm_ioremap_resource(d, res);
	if (IS_ERR(e))
-	{
- 		dev_err(...);
		S
-	}
	else S1
<|end_of_text|> 
 | 205 
							 | 
					
	--- initial
+++ final
@@ -1,64 +1,64 @@
 int xfs_attr_rmtval_remove(struct xfs_da_args *args) {
   struct xfs_mount *mp = args->dp->i_mount;
   xfs_dablk_t lblkno;
   int blkcnt;
   int error;
   int done;
   trace_xfs_attr_rmtval_remove(args);
   /*
    * Roll through the "value", invalidating the attribute value's blocks.
    */
   lblkno = args->rmtblkno;
   blkcnt = args->rmtblkcnt;
   while (blkcnt > 0) {
     struct xfs_bmbt_irec map;
     struct xfs_buf *bp;
     xfs_daddr_t dblkno;
     int dblkcnt;
     int nmap;
     /*
      * Try to remember where we decided to put the value.
      */
     nmap = 1;
     error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
     if (error) return error;
     ASSERT(nmap == 1);
     ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK));
     dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
     /*
      * If the "remote" value is in the cache, remove it.
      */
     bp = xfs_buf_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
     if (bp) {
       xfs_buf_stale(bp);
       xfs_buf_relse(bp);
       bp = NULL;
     }
     lblkno += map.br_blockcount;
     blkcnt -= map.br_blockcount;
   }
   /*
    * Keep de-allocating extents until the remote-value region is gone.
    */
   lblkno = args->rmtblkno;
   blkcnt = args->rmtblkcnt;
   done = 0;
   while (!done) {
-    xfs_defer_init(args->trans->t_dfops, args->firstblock);
+    xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
     error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, XFS_BMAPI_ATTRFORK, 1, args->firstblock, &done);
     if (error) goto out_defer_cancel;
     xfs_defer_ijoin(args->trans->t_dfops, args->dp);
     error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
     if (error) goto out_defer_cancel;
     /*
      * Close out trans and start the next one in the chain.
      */
     error = xfs_trans_roll_inode(&args->trans, args->dp);
     if (error) return error;
   }
   return 0;
 out_defer_cancel:
   xfs_defer_cancel(args->trans->t_dfops);
   args->trans = NULL;
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
+		xfs_defer_init(NULL, e1, e2);
<|end_of_text|> 
 | 206 
							 | 
					
	--- initial
+++ final
@@ -1,96 +1,96 @@
 int xfs_attr_rmtval_set(struct xfs_da_args *args) {
   struct xfs_inode *dp = args->dp;
   struct xfs_mount *mp = dp->i_mount;
   struct xfs_bmbt_irec map;
   xfs_dablk_t lblkno;
   xfs_fileoff_t lfileoff = 0;
   uint8_t *src = args->value;
   int blkcnt;
   int valuelen;
   int nmap;
   int error;
   int offset = 0;
   trace_xfs_attr_rmtval_set(args);
   /*
    * Find a "hole" in the attribute address space large enough for
    * us to drop the new attribute's value into. Because CRC enable
    * attributes have headers, we can't just do a straight byte to FSB
    * conversion and have to take the header space into account.
    */
   blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
   error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, XFS_ATTR_FORK);
   if (error) return error;
   args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
   args->rmtblkcnt = blkcnt;
   /*
    * Roll through the "value", allocating blocks on disk as required.
    */
   while (blkcnt > 0) {
     /*
      * Allocate a single extent, up to the size of the value.
      *
      * Note that we have to consider this a data allocation as we
      * write the remote attribute without logging the contents.
      * Hence we must ensure that we aren't using blocks that are on
      * the busy list so that we don't overwrite blocks which have
      * recently been freed but their transactions are not yet
      * committed to disk. If we overwrite the contents of a busy
      * extent and then crash then the block may not contain the
      * correct metadata after log recovery occurs.
      */
-    xfs_defer_init(args->trans->t_dfops, args->firstblock);
+    xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
     nmap = 1;
     error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock, args->total, &map, &nmap);
     if (error) goto out_defer_cancel;
     xfs_defer_ijoin(args->trans->t_dfops, dp);
     error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
     if (error) goto out_defer_cancel;
     ASSERT(nmap == 1);
     ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK));
     lblkno += map.br_blockcount;
     blkcnt -= map.br_blockcount;
     /*
      * Start the next trans in the chain.
      */
     error = xfs_trans_roll_inode(&args->trans, dp);
     if (error) return error;
   }
   /*
    * Roll through the "value", copying the attribute value to the
    * already-allocated blocks.  Blocks are written synchronously
    * so that we can know they are all on disk before we turn off
    * the INCOMPLETE flag.
    */
   lblkno = args->rmtblkno;
   blkcnt = args->rmtblkcnt;
   valuelen = args->rmtvaluelen;
   while (valuelen > 0) {
     struct xfs_buf *bp;
     xfs_daddr_t dblkno;
     int dblkcnt;
     ASSERT(blkcnt > 0);
-    xfs_defer_init(args->trans->t_dfops, args->firstblock);
+    xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
     nmap = 1;
     error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno, blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
     if (error) return error;
     ASSERT(nmap == 1);
     ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK));
     dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
     bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0);
     if (!bp) return -ENOMEM;
     bp->b_ops = &xfs_attr3_rmt_buf_ops;
     xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset, &valuelen, &src);
     error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
     xfs_buf_relse(bp);
     if (error) return error;
     /* roll attribute extent map forwards */
     lblkno += map.br_blockcount;
     blkcnt -= map.br_blockcount;
   }
   ASSERT(valuelen == 0);
   return 0;
 out_defer_cancel:
   xfs_defer_cancel(args->trans->t_dfops);
   args->trans = NULL;
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
+		xfs_defer_init(NULL, e1, e2);
<|end_of_text|> 
 | 207 
							 | 
					
	--- initial
+++ final
@@ -1,147 +1,147 @@
 STATIC int xfs_attr_leaf_addname(struct xfs_da_args *args) {
   struct xfs_inode *dp;
   struct xfs_buf *bp;
   int retval, error, forkoff;
   trace_xfs_attr_leaf_addname(args);
   /*
    * Read the (only) block in the attribute list in.
    */
   dp = args->dp;
   args->blkno = 0;
   error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
   if (error) return error;
   /*
    * Look up the given attribute in the leaf block.  Figure out if
    * the given flags produce an error or call for an atomic rename.
    */
   retval = xfs_attr3_leaf_lookup_int(bp, args);
   if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) {
     xfs_trans_brelse(args->trans, bp);
     return retval;
   } else if (retval == -EEXIST) {
     if (args->flags & ATTR_CREATE) { /* pure create op */
       xfs_trans_brelse(args->trans, bp);
       return retval;
     }
     trace_xfs_attr_leaf_replace(args);
     /* save the attribute state for later removal*/
     args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */
     args->blkno2 = args->blkno;         /* set 2nd entry info*/
     args->index2 = args->index;
     args->rmtblkno2 = args->rmtblkno;
     args->rmtblkcnt2 = args->rmtblkcnt;
     args->rmtvaluelen2 = args->rmtvaluelen;
     /*
      * clear the remote attr state now that it is saved so that the
      * values reflect the state of the attribute we are about to
      * add, not the attribute we just found and will remove later.
      */
     args->rmtblkno = 0;
     args->rmtblkcnt = 0;
     args->rmtvaluelen = 0;
   }
   /*
    * Add the attribute to the leaf block, transitioning to a Btree
    * if required.
    */
   retval = xfs_attr3_leaf_add(bp, args);
   if (retval == -ENOSPC) {
     /*
      * Promote the attribute list to the Btree format, then
      * Commit that transaction so that the node_addname() call
      * can manage its own transactions.
      */
-    xfs_defer_init(args->trans->t_dfops, args->firstblock);
+    xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
     error = xfs_attr3_leaf_to_node(args);
     if (error) goto out_defer_cancel;
     xfs_defer_ijoin(args->trans->t_dfops, dp);
     error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
     if (error) goto out_defer_cancel;
     /*
      * Commit the current trans (including the inode) and start
      * a new one.
      */
     error = xfs_trans_roll_inode(&args->trans, dp);
     if (error) return error;
     /*
      * Fob the whole rest of the problem off on the Btree code.
      */
     error = xfs_attr_node_addname(args);
     return error;
   }
   /*
    * Commit the transaction that added the attr name so that
    * later routines can manage their own transactions.
    */
   error = xfs_trans_roll_inode(&args->trans, dp);
   if (error) return error;
   /*
    * If there was an out-of-line value, allocate the blocks we
    * identified for its storage and copy the value.  This is done
    * after we create the attribute so that we don't overflow the
    * maximum size of a transaction and/or hit a deadlock.
    */
   if (args->rmtblkno > 0) {
     error = xfs_attr_rmtval_set(args);
     if (error) return error;
   }
   /*
    * If this is an atomic rename operation, we must "flip" the
    * incomplete flags on the "new" and "old" attribute/value pairs
    * so that one disappears and one appears atomically.  Then we
    * must remove the "old" attribute/value pair.
    */
   if (args->op_flags & XFS_DA_OP_RENAME) {
     /*
      * In a separate transaction, set the incomplete flag on the
      * "old" attr and clear the incomplete flag on the "new" attr.
      */
     error = xfs_attr3_leaf_flipflags(args);
     if (error) return error;
     /*
      * Dismantle the "old" attribute/value pair by removing
      * a "remote" value (if it exists).
      */
     args->index = args->index2;
     args->blkno = args->blkno2;
     args->rmtblkno = args->rmtblkno2;
     args->rmtblkcnt = args->rmtblkcnt2;
     args->rmtvaluelen = args->rmtvaluelen2;
     if (args->rmtblkno) {
       error = xfs_attr_rmtval_remove(args);
       if (error) return error;
     }
     /*
      * Read in the block containing the "old" attr, then
      * remove the "old" attr from that block (neat, huh!)
      */
     error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
     if (error) return error;
     xfs_attr3_leaf_remove(bp, args);
     /*
      * If the result is small enough, shrink it all into the inode.
      */
     if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
-      xfs_defer_init(args->trans->t_dfops, args->firstblock);
+      xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
       error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
       /* bp is gone due to xfs_da_shrink_inode */
       if (error) goto out_defer_cancel;
       xfs_defer_ijoin(args->trans->t_dfops, dp);
       error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
       if (error) goto out_defer_cancel;
     }
     /*
      * Commit the remove and start the next trans in series.
      */
     error = xfs_trans_roll_inode(&args->trans, dp);
   } else if (args->rmtblkno > 0) {
     /*
      * Added a "remote" value, just clear the incomplete flag.
      */
     error = xfs_attr3_leaf_clearflag(args);
   }
   return error;
 out_defer_cancel:
   xfs_defer_cancel(args->trans->t_dfops);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
+		xfs_defer_init(NULL, e1, e2);
<|end_of_text|> 
 | 208 
							 | 
					
	--- initial
+++ final
@@ -1,35 +1,35 @@
 STATIC int xfs_attr_leaf_removename(struct xfs_da_args *args) {
   struct xfs_inode *dp;
   struct xfs_buf *bp;
   int error, forkoff;
   trace_xfs_attr_leaf_removename(args);
   /*
    * Remove the attribute.
    */
   dp = args->dp;
   args->blkno = 0;
   error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
   if (error) return error;
   error = xfs_attr3_leaf_lookup_int(bp, args);
   if (error == -ENOATTR) {
     xfs_trans_brelse(args->trans, bp);
     return error;
   }
   xfs_attr3_leaf_remove(bp, args);
   /*
    * If the result is small enough, shrink it all into the inode.
    */
   if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
-    xfs_defer_init(args->trans->t_dfops, args->firstblock);
+    xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
     error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
     /* bp is gone due to xfs_da_shrink_inode */
     if (error) goto out_defer_cancel;
     xfs_defer_ijoin(args->trans->t_dfops, dp);
     error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
     if (error) goto out_defer_cancel;
   }
   return 0;
 out_defer_cancel:
   xfs_defer_cancel(args->trans->t_dfops);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
+		xfs_defer_init(NULL, e1, e2);
<|end_of_text|> 
 | 209 
							 | 
					
	--- initial
+++ final
@@ -1,186 +1,186 @@
 STATIC int xfs_attr_node_addname(struct xfs_da_args *args) {
   struct xfs_da_state *state;
   struct xfs_da_state_blk *blk;
   struct xfs_inode *dp;
   struct xfs_mount *mp;
   int retval, error;
   trace_xfs_attr_node_addname(args);
   /*
    * Fill in bucket of arguments/results/context to carry around.
    */
   dp = args->dp;
   mp = dp->i_mount;
 restart:
   state = xfs_da_state_alloc();
   state->args = args;
   state->mp = mp;
   /*
    * Search to see if name already exists, and get back a pointer
    * to where it should go.
    */
   error = xfs_da3_node_lookup_int(state, &retval);
   if (error) goto out;
   blk = &state->path.blk[state->path.active - 1];
   ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
   if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) {
     goto out;
   } else if (retval == -EEXIST) {
     if (args->flags & ATTR_CREATE) goto out;
     trace_xfs_attr_node_replace(args);
     /* save the attribute state for later removal*/
     args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */
     args->blkno2 = args->blkno;         /* set 2nd entry info*/
     args->index2 = args->index;
     args->rmtblkno2 = args->rmtblkno;
     args->rmtblkcnt2 = args->rmtblkcnt;
     args->rmtvaluelen2 = args->rmtvaluelen;
     /*
      * clear the remote attr state now that it is saved so that the
      * values reflect the state of the attribute we are about to
      * add, not the attribute we just found and will remove later.
      */
     args->rmtblkno = 0;
     args->rmtblkcnt = 0;
     args->rmtvaluelen = 0;
   }
   retval = xfs_attr3_leaf_add(blk->bp, state->args);
   if (retval == -ENOSPC) {
     if (state->path.active == 1) {
       /*
        * Its really a single leaf node, but it had
        * out-of-line values so it looked like it *might*
        * have been a b-tree.
        */
       xfs_da_state_free(state);
       state = NULL;
-      xfs_defer_init(args->trans->t_dfops, args->firstblock);
+      xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
       error = xfs_attr3_leaf_to_node(args);
       if (error) goto out_defer_cancel;
       xfs_defer_ijoin(args->trans->t_dfops, dp);
       error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
       if (error) goto out_defer_cancel;
       /*
        * Commit the node conversion and start the next
        * trans in the chain.
        */
       error = xfs_trans_roll_inode(&args->trans, dp);
       if (error) goto out;
       goto restart;
     }
     /*
      * Split as many Btree elements as required.
      * This code tracks the new and old attr's location
      * in the index/blkno/rmtblkno/rmtblkcnt fields and
      * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
      */
-    xfs_defer_init(args->trans->t_dfops, args->firstblock);
+    xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
     error = xfs_da3_split(state);
     if (error) goto out_defer_cancel;
     xfs_defer_ijoin(args->trans->t_dfops, dp);
     error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
     if (error) goto out_defer_cancel;
   } else {
     /*
      * Addition succeeded, update Btree hashvals.
      */
     xfs_da3_fixhashpath(state, &state->path);
   }
   /*
    * Kill the state structure, we're done with it and need to
    * allow the buffers to come back later.
    */
   xfs_da_state_free(state);
   state = NULL;
   /*
    * Commit the leaf addition or btree split and start the next
    * trans in the chain.
    */
   error = xfs_trans_roll_inode(&args->trans, dp);
   if (error) goto out;
   /*
    * If there was an out-of-line value, allocate the blocks we
    * identified for its storage and copy the value.  This is done
    * after we create the attribute so that we don't overflow the
    * maximum size of a transaction and/or hit a deadlock.
    */
   if (args->rmtblkno > 0) {
     error = xfs_attr_rmtval_set(args);
     if (error) return error;
   }
   /*
    * If this is an atomic rename operation, we must "flip" the
    * incomplete flags on the "new" and "old" attribute/value pairs
    * so that one disappears and one appears atomically.  Then we
    * must remove the "old" attribute/value pair.
    */
   if (args->op_flags & XFS_DA_OP_RENAME) {
     /*
      * In a separate transaction, set the incomplete flag on the
      * "old" attr and clear the incomplete flag on the "new" attr.
      */
     error = xfs_attr3_leaf_flipflags(args);
     if (error) goto out;
     /*
      * Dismantle the "old" attribute/value pair by removing
      * a "remote" value (if it exists).
      */
     args->index = args->index2;
     args->blkno = args->blkno2;
     args->rmtblkno = args->rmtblkno2;
     args->rmtblkcnt = args->rmtblkcnt2;
     args->rmtvaluelen = args->rmtvaluelen2;
     if (args->rmtblkno) {
       error = xfs_attr_rmtval_remove(args);
       if (error) return error;
     }
     /*
      * Re-find the "old" attribute entry after any split ops.
      * The INCOMPLETE flag means that we will find the "old"
      * attr, not the "new" one.
      */
     args->flags |= XFS_ATTR_INCOMPLETE;
     state = xfs_da_state_alloc();
     state->args = args;
     state->mp = mp;
     state->inleaf = 0;
     error = xfs_da3_node_lookup_int(state, &retval);
     if (error) goto out;
     /*
      * Remove the name and update the hashvals in the tree.
      */
     blk = &state->path.blk[state->path.active - 1];
     ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
     error = xfs_attr3_leaf_remove(blk->bp, args);
     xfs_da3_fixhashpath(state, &state->path);
     /*
      * Check to see if the tree needs to be collapsed.
      */
     if (retval && (state->path.active > 1)) {
-      xfs_defer_init(args->trans->t_dfops, args->firstblock);
+      xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
       error = xfs_da3_join(state);
       if (error) goto out_defer_cancel;
       xfs_defer_ijoin(args->trans->t_dfops, dp);
       error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
       if (error) goto out_defer_cancel;
     }
     /*
      * Commit and start the next trans in the chain.
      */
     error = xfs_trans_roll_inode(&args->trans, dp);
     if (error) goto out;
   } else if (args->rmtblkno > 0) {
     /*
      * Added a "remote" value, just clear the incomplete flag.
      */
     error = xfs_attr3_leaf_clearflag(args);
     if (error) goto out;
   }
   retval = error = 0;
 out:
   if (state) xfs_da_state_free(state);
   if (error) return error;
   return retval;
 out_defer_cancel:
   xfs_defer_cancel(args->trans->t_dfops);
   goto out;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
+		xfs_defer_init(NULL, e1, e2);
<|end_of_text|> 
 | 210 
							 | 
					
	--- initial
+++ final
@@ -1,107 +1,107 @@
 STATIC int xfs_attr_node_removename(struct xfs_da_args *args) {
   struct xfs_da_state *state;
   struct xfs_da_state_blk *blk;
   struct xfs_inode *dp;
   struct xfs_buf *bp;
   int retval, error, forkoff;
   trace_xfs_attr_node_removename(args);
   /*
    * Tie a string around our finger to remind us where we are.
    */
   dp = args->dp;
   state = xfs_da_state_alloc();
   state->args = args;
   state->mp = dp->i_mount;
   /*
    * Search to see if name exists, and get back a pointer to it.
    */
   error = xfs_da3_node_lookup_int(state, &retval);
   if (error || (retval != -EEXIST)) {
     if (error == 0) error = retval;
     goto out;
   }
   /*
    * If there is an out-of-line value, de-allocate the blocks.
    * This is done before we remove the attribute so that we don't
    * overflow the maximum size of a transaction and/or hit a deadlock.
    */
   blk = &state->path.blk[state->path.active - 1];
   ASSERT(blk->bp != NULL);
   ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
   if (args->rmtblkno > 0) {
     /*
      * Fill in disk block numbers in the state structure
      * so that we can get the buffers back after we commit
      * several transactions in the following calls.
      */
     error = xfs_attr_fillstate(state);
     if (error) goto out;
     /*
      * Mark the attribute as INCOMPLETE, then bunmapi() the
      * remote value.
      */
     error = xfs_attr3_leaf_setflag(args);
     if (error) goto out;
     error = xfs_attr_rmtval_remove(args);
     if (error) goto out;
     /*
      * Refill the state structure with buffers, the prior calls
      * released our buffers.
      */
     error = xfs_attr_refillstate(state);
     if (error) goto out;
   }
   /*
    * Remove the name and update the hashvals in the tree.
    */
   blk = &state->path.blk[state->path.active - 1];
   ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
   retval = xfs_attr3_leaf_remove(blk->bp, args);
   xfs_da3_fixhashpath(state, &state->path);
   /*
    * Check to see if the tree needs to be collapsed.
    */
   if (retval && (state->path.active > 1)) {
-    xfs_defer_init(args->trans->t_dfops, args->firstblock);
+    xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
     error = xfs_da3_join(state);
     if (error) goto out_defer_cancel;
     xfs_defer_ijoin(args->trans->t_dfops, dp);
     error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
     if (error) goto out_defer_cancel;
     /*
      * Commit the Btree join operation and start a new trans.
      */
     error = xfs_trans_roll_inode(&args->trans, dp);
     if (error) goto out;
   }
   /*
    * If the result is small enough, push it all into the inode.
    */
   if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
     /*
      * Have to get rid of the copy of this dabuf in the state.
      */
     ASSERT(state->path.active == 1);
     ASSERT(state->path.blk[0].bp);
     state->path.blk[0].bp = NULL;
     error = xfs_attr3_leaf_read(args->trans, args->dp, 0, -1, &bp);
     if (error) goto out;
     if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
-      xfs_defer_init(args->trans->t_dfops, args->firstblock);
+      xfs_defer_init(NULL, args->trans->t_dfops, args->firstblock);
       error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
       /* bp is gone due to xfs_da_shrink_inode */
       if (error) goto out_defer_cancel;
       xfs_defer_ijoin(args->trans->t_dfops, dp);
       error = xfs_defer_finish(&args->trans, args->trans->t_dfops);
       if (error) goto out_defer_cancel;
     } else
       xfs_trans_brelse(args->trans, bp);
   }
   error = 0;
 out:
   xfs_da_state_free(state);
   return error;
 out_defer_cancel:
   xfs_defer_cancel(args->trans->t_dfops);
   goto out;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
+		xfs_defer_init(NULL, e1, e2);
<|end_of_text|> 
 | 211 
							 | 
					
	--- initial
+++ final
@@ -1,62 +1,61 @@
 int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags) {
   struct xfs_mount *mp = dp->i_mount;
   struct xfs_da_args args;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t firstblock;
   int error;
   XFS_STATS_INC(mp, xs_attr_remove);
   if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return -EIO;
   error = xfs_attr_args_init(&args, dp, name, flags);
   if (error) return error;
   args.firstblock = &firstblock;
   /*
    * we have no control over the attribute names that userspace passes us
    * to remove, so we have to allow the name lookup prior to attribute
    * removal to fail.
    */
   args.op_flags = XFS_DA_OP_OKNOENT;
   error = xfs_qm_dqattach(dp);
   if (error) return error;
   /*
    * Root fork attributes can use reserved data blocks for this
    * operation if necessary
    */
   error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrrm, XFS_ATTRRM_SPACE_RES(mp), 0, (flags & ATTR_ROOT) ? XFS_TRANS_RESERVE : 0, &args.trans);
   if (error) return error;
-  xfs_defer_init(&dfops, &firstblock);
-  args.trans->t_dfops = &dfops;
+  xfs_defer_init(args.trans, &dfops, &firstblock);
   xfs_ilock(dp, XFS_ILOCK_EXCL);
   /*
    * No need to make quota reservations here. We expect to release some
    * blocks not allocate in the common case.
    */
   xfs_trans_ijoin(args.trans, dp, 0);
   if (!xfs_inode_hasattr(dp)) {
     error = -ENOATTR;
   } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
     ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
     error = xfs_attr_shortform_remove(&args);
   } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
     error = xfs_attr_leaf_removename(&args);
   } else {
     error = xfs_attr_node_removename(&args);
   }
   if (error) goto out;
   /*
    * If this is a synchronous mount, make sure that the
    * transaction goes to disk before returning to the user.
    */
   if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(args.trans);
   if ((flags & ATTR_KERNOTIME) == 0) xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
   /*
    * Commit the last in the sequence of transactions.
    */
   xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
   error = xfs_trans_commit(args.trans);
   xfs_iunlock(dp, XFS_ILOCK_EXCL);
   return error;
 out:
   if (args.trans) xfs_trans_cancel(args.trans);
   xfs_iunlock(dp, XFS_ILOCK_EXCL);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 212 
							 | 
					
	--- initial
+++ final
@@ -1,131 +1,130 @@
 int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name, unsigned char *value, int valuelen, int flags) {
   struct xfs_mount *mp = dp->i_mount;
   struct xfs_buf *leaf_bp = NULL;
   struct xfs_da_args args;
   struct xfs_defer_ops dfops;
   struct xfs_trans_res tres;
   xfs_fsblock_t firstblock;
   int rsvd = (flags & ATTR_ROOT) != 0;
   int error, err2, local;
   XFS_STATS_INC(mp, xs_attr_set);
   if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return -EIO;
   error = xfs_attr_args_init(&args, dp, name, flags);
   if (error) return error;
   args.value = value;
   args.valuelen = valuelen;
   args.firstblock = &firstblock;
   args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
   args.total = xfs_attr_calc_size(&args, &local);
   error = xfs_qm_dqattach(dp);
   if (error) return error;
   /*
    * If the inode doesn't have an attribute fork, add one.
    * (inode must not be locked when we call this routine)
    */
   if (XFS_IFORK_Q(dp) == 0) {
     int sf_size = sizeof(xfs_attr_sf_hdr_t) + XFS_ATTR_SF_ENTSIZE_BYNAME(args.namelen, valuelen);
     error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);
     if (error) return error;
   }
   tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres + M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
   tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
   tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
   /*
    * Root fork attributes can use reserved data blocks for this
    * operation if necessary
    */
   error = xfs_trans_alloc(mp, &tres, args.total, 0, rsvd ? XFS_TRANS_RESERVE : 0, &args.trans);
   if (error) return error;
-  xfs_defer_init(&dfops, &firstblock);
-  args.trans->t_dfops = &dfops;
+  xfs_defer_init(args.trans, &dfops, &firstblock);
   xfs_ilock(dp, XFS_ILOCK_EXCL);
   error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0, rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : XFS_QMOPT_RES_REGBLKS);
   if (error) {
     xfs_iunlock(dp, XFS_ILOCK_EXCL);
     xfs_trans_cancel(args.trans);
     return error;
   }
   xfs_trans_ijoin(args.trans, dp, 0);
   /*
    * If the attribute list is non-existent or a shortform list,
    * upgrade it to a single-leaf-block attribute list.
    */
   if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL || (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && dp->i_d.di_anextents == 0)) {
     /*
      * Build initial attribute list (if required).
      */
     if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) xfs_attr_shortform_create(&args);
     /*
      * Try to add the attr to the attribute list in
      * the inode.
      */
     error = xfs_attr_shortform_addname(&args);
     if (error != -ENOSPC) {
       /*
        * Commit the shortform mods, and we're done.
        * NOTE: this is also the error path (EEXIST, etc).
        */
       ASSERT(args.trans != NULL);
       /*
        * If this is a synchronous mount, make sure that
        * the transaction goes to disk before returning
        * to the user.
        */
       if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(args.trans);
       if (!error && (flags & ATTR_KERNOTIME) == 0) { xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG); }
       err2 = xfs_trans_commit(args.trans);
       xfs_iunlock(dp, XFS_ILOCK_EXCL);
       return error ? error : err2;
     }
     /*
      * It won't fit in the shortform, transform to a leaf block.
      * GROT: another possible req'mt for a double-split btree op.
      */
     error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
     if (error) goto out_defer_cancel;
     /*
      * Prevent the leaf buffer from being unlocked so that a
      * concurrent AIL push cannot grab the half-baked leaf
      * buffer and run into problems with the write verifier.
      */
     xfs_trans_bhold(args.trans, leaf_bp);
     xfs_defer_bjoin(&dfops, leaf_bp);
     xfs_defer_ijoin(&dfops, dp);
     error = xfs_defer_finish(&args.trans, &dfops);
     if (error) goto out_defer_cancel;
     /*
      * Commit the leaf transformation.  We'll need another (linked)
      * transaction to add the new attribute to the leaf, which
      * means that we have to hold & join the leaf buffer here too.
      */
     error = xfs_trans_roll_inode(&args.trans, dp);
     if (error) goto out;
     xfs_trans_bjoin(args.trans, leaf_bp);
     leaf_bp = NULL;
   }
   if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
     error = xfs_attr_leaf_addname(&args);
   else
     error = xfs_attr_node_addname(&args);
   if (error) goto out;
   /*
    * If this is a synchronous mount, make sure that the
    * transaction goes to disk before returning to the user.
    */
   if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(args.trans);
   if ((flags & ATTR_KERNOTIME) == 0) xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
   /*
    * Commit the last in the sequence of transactions.
    */
   xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
   error = xfs_trans_commit(args.trans);
   xfs_iunlock(dp, XFS_ILOCK_EXCL);
   return error;
 out_defer_cancel:
   xfs_defer_cancel(&dfops);
 out:
   if (leaf_bp) xfs_trans_brelse(args.trans, leaf_bp);
   if (args.trans) xfs_trans_cancel(args.trans);
   xfs_iunlock(dp, XFS_ILOCK_EXCL);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 213 
							 | 
					
	--- initial
+++ final
@@ -1,114 +1,113 @@
 int xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t len, int alloc_type) {
   xfs_mount_t *mp = ip->i_mount;
   xfs_off_t count;
   xfs_filblks_t allocated_fsb;
   xfs_filblks_t allocatesize_fsb;
   xfs_extlen_t extsz, temp;
   xfs_fileoff_t startoffset_fsb;
   xfs_fsblock_t firstfsb;
   int nimaps;
   int quota_flag;
   int rt;
   xfs_trans_t *tp;
   xfs_bmbt_irec_t imaps[1], *imapp;
   struct xfs_defer_ops dfops;
   uint qblocks, resblks, resrtextents;
   int error;
   trace_xfs_alloc_file_space(ip);
   if (XFS_FORCED_SHUTDOWN(mp)) return -EIO;
   error = xfs_qm_dqattach(ip);
   if (error) return error;
   if (len <= 0) return -EINVAL;
   rt = XFS_IS_REALTIME_INODE(ip);
   extsz = xfs_get_extsz_hint(ip);
   count = len;
   imapp = &imaps[0];
   nimaps = 1;
   startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
   allocatesize_fsb = XFS_B_TO_FSB(mp, count);
   /*
    * Allocate file space until done or until there is an error
    */
   while (allocatesize_fsb && !error) {
     xfs_fileoff_t s, e;
     /*
      * Determine space reservations for data/realtime.
      */
     if (unlikely(extsz)) {
       s = startoffset_fsb;
       do_div(s, extsz);
       s *= extsz;
       e = startoffset_fsb + allocatesize_fsb;
       div_u64_rem(startoffset_fsb, extsz, &temp);
       if (temp) e += temp;
       div_u64_rem(e, extsz, &temp);
       if (temp) e += extsz - temp;
     } else {
       s = 0;
       e = allocatesize_fsb;
     }
     /*
      * The transaction reservation is limited to a 32-bit block
      * count, hence we need to limit the number of blocks we are
      * trying to reserve to avoid an overflow. We can't allocate
      * more than @nimaps extents, and an extent is limited on disk
      * to MAXEXTLEN (21 bits), so use that to enforce the limit.
      */
     resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
     if (unlikely(rt)) {
       resrtextents = qblocks = resblks;
       resrtextents /= mp->m_sb.sb_rextsize;
       resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
       quota_flag = XFS_QMOPT_RES_RTBLKS;
     } else {
       resrtextents = 0;
       resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
       quota_flag = XFS_QMOPT_RES_REGBLKS;
     }
     /*
      * Allocate and setup the transaction.
      */
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents, 0, &tp);
     /*
      * Check for running out of space
      */
     if (error) {
       /*
        * Free the transaction structure.
        */
       ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
       break;
     }
     xfs_ilock(ip, XFS_ILOCK_EXCL);
     error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
     if (error) goto error1;
     xfs_trans_ijoin(tp, ip, 0);
-    xfs_defer_init(&dfops, &firstfsb);
-    tp->t_dfops = &dfops;
+    xfs_defer_init(tp, &dfops, &firstfsb);
     error = xfs_bmapi_write(tp, ip, startoffset_fsb, allocatesize_fsb, alloc_type, &firstfsb, resblks, imapp, &nimaps);
     if (error) goto error0;
     /*
      * Complete the transaction
      */
     error = xfs_defer_finish(&tp, tp->t_dfops);
     if (error) goto error0;
     error = xfs_trans_commit(tp);
     xfs_iunlock(ip, XFS_ILOCK_EXCL);
     if (error) break;
     allocated_fsb = imapp->br_blockcount;
     if (nimaps == 0) {
       error = -ENOSPC;
       break;
     }
     startoffset_fsb += allocated_fsb;
     allocatesize_fsb -= allocated_fsb;
   }
   return error;
 error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
   xfs_defer_cancel(&dfops);
   xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
 error1: /* Just cancel transaction */
   xfs_trans_cancel(tp);
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 214 
							 | 
					
	--- initial
+++ final
@@ -1,39 +1,38 @@
 int xfs_collapse_file_space(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t len) {
   struct xfs_mount *mp = ip->i_mount;
   struct xfs_trans *tp;
   int error;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t first_block;
   xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
   xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
   uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
   bool done = false;
   ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
   ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
   trace_xfs_collapse_file_space(ip);
   error = xfs_free_file_space(ip, offset, len);
   if (error) return error;
   error = xfs_prepare_shift(ip, offset);
   if (error) return error;
   while (!error && !done) {
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
     if (error) break;
     xfs_ilock(ip, XFS_ILOCK_EXCL);
     error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot, ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
     if (error) goto out_trans_cancel;
     xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-    xfs_defer_init(&dfops, &first_block);
-    tp->t_dfops = &dfops;
+    xfs_defer_init(tp, &dfops, &first_block);
     error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb, &done, &first_block);
     if (error) goto out_bmap_cancel;
     error = xfs_defer_finish(&tp, tp->t_dfops);
     if (error) goto out_bmap_cancel;
     error = xfs_trans_commit(tp);
   }
   return error;
 out_bmap_cancel:
   xfs_defer_cancel(tp->t_dfops);
 out_trans_cancel:
   xfs_trans_cancel(tp);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 215 
							 | 
					
	--- initial
+++ final
@@ -1,43 +1,42 @@
 int xfs_insert_file_space(struct xfs_inode *ip, loff_t offset, loff_t len) {
   struct xfs_mount *mp = ip->i_mount;
   struct xfs_trans *tp;
   int error;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t first_block;
   xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
   xfs_fileoff_t next_fsb = NULLFSBLOCK;
   xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
   bool done = false;
   ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
   ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
   trace_xfs_insert_file_space(ip);
   error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
   if (error) return error;
   error = xfs_prepare_shift(ip, offset);
   if (error) return error;
   /*
    * The extent shifting code works on extent granularity. So, if stop_fsb
    * is not the starting block of extent, we need to split the extent at
    * stop_fsb.
    */
   error = xfs_bmap_split_extent(ip, stop_fsb);
   if (error) return error;
   while (!error && !done) {
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
     if (error) break;
     xfs_ilock(ip, XFS_ILOCK_EXCL);
     xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-    xfs_defer_init(&dfops, &first_block);
-    tp->t_dfops = &dfops;
+    xfs_defer_init(tp, &dfops, &first_block);
     error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb, &done, stop_fsb, &first_block);
     if (error) goto out_bmap_cancel;
     error = xfs_defer_finish(&tp, tp->t_dfops);
     if (error) goto out_bmap_cancel;
     error = xfs_trans_commit(tp);
   }
   return error;
 out_bmap_cancel:
   xfs_defer_cancel(tp->t_dfops);
   xfs_trans_cancel(tp);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 216 
							 | 
					
	--- initial
+++ final
@@ -1,83 +1,83 @@
 STATIC int xfs_swap_extent_rmap(struct xfs_trans **tpp, struct xfs_inode *ip, struct xfs_inode *tip) {
   struct xfs_trans *tp = *tpp;
   struct xfs_mount *mp = tp->t_mountp;
   struct xfs_bmbt_irec irec;
   struct xfs_bmbt_irec uirec;
   struct xfs_bmbt_irec tirec;
   xfs_fileoff_t offset_fsb;
   xfs_fileoff_t end_fsb;
   xfs_filblks_t count_fsb;
   xfs_fsblock_t firstfsb;
   int error;
   xfs_filblks_t ilen;
   xfs_filblks_t rlen;
   int nimaps;
   uint64_t tip_flags2;
   /*
    * If the source file has shared blocks, we must flag the donor
    * file as having shared blocks so that we get the shared-block
    * rmap functions when we go to fix up the rmaps.  The flags
    * will be switch for reals later.
    */
   tip_flags2 = tip->i_d.di_flags2;
   if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
   offset_fsb = 0;
   end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
   count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
   while (count_fsb) {
     /* Read extent from the donor file */
     nimaps = 1;
     error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec, &nimaps, 0);
     if (error) goto out;
     ASSERT(nimaps == 1);
     ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
     trace_xfs_swap_extent_rmap_remap(tip, &tirec);
     ilen = tirec.br_blockcount;
     /* Unmap the old blocks in the source file. */
     while (tirec.br_blockcount) {
-      xfs_defer_init(tp->t_dfops, &firstfsb);
+      xfs_defer_init(NULL, tp->t_dfops, &firstfsb);
       trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
       /* Read extent from the source file */
       nimaps = 1;
       error = xfs_bmapi_read(ip, tirec.br_startoff, tirec.br_blockcount, &irec, &nimaps, 0);
       if (error) goto out_defer;
       ASSERT(nimaps == 1);
       ASSERT(tirec.br_startoff == irec.br_startoff);
       trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
       /* Trim the extent. */
       uirec = tirec;
       uirec.br_blockcount = rlen = min_t(xfs_filblks_t, tirec.br_blockcount, irec.br_blockcount);
       trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
       /* Remove the mapping from the donor file. */
       error = xfs_bmap_unmap_extent(mp, tp->t_dfops, tip, &uirec);
       if (error) goto out_defer;
       /* Remove the mapping from the source file. */
       error = xfs_bmap_unmap_extent(mp, tp->t_dfops, ip, &irec);
       if (error) goto out_defer;
       /* Map the donor file's blocks into the source file. */
       error = xfs_bmap_map_extent(mp, tp->t_dfops, ip, &uirec);
       if (error) goto out_defer;
       /* Map the source file's blocks into the donor file. */
       error = xfs_bmap_map_extent(mp, tp->t_dfops, tip, &irec);
       if (error) goto out_defer;
       xfs_defer_ijoin(tp->t_dfops, ip);
       error = xfs_defer_finish(tpp, tp->t_dfops);
       tp = *tpp;
       if (error) goto out_defer;
       tirec.br_startoff += rlen;
       if (tirec.br_startblock != HOLESTARTBLOCK && tirec.br_startblock != DELAYSTARTBLOCK) tirec.br_startblock += rlen;
       tirec.br_blockcount -= rlen;
     }
     /* Roll on... */
     count_fsb -= ilen;
     offset_fsb += ilen;
   }
   tip->i_d.di_flags2 = tip_flags2;
   return 0;
 out_defer:
   xfs_defer_cancel(tp->t_dfops);
 out:
   trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
   tip->i_d.di_flags2 = tip_flags2;
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
+		xfs_defer_init(NULL, e1, e2);
<|end_of_text|> 
 | 217 
							 | 
					
	--- initial
+++ final
@@ -1,33 +1,32 @@
 static int xfs_unmap_extent(struct xfs_inode *ip, xfs_fileoff_t startoffset_fsb, xfs_filblks_t len_fsb, int *done) {
   struct xfs_mount *mp = ip->i_mount;
   struct xfs_trans *tp;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t firstfsb;
   uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
   int error;
   error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
   if (error) {
     ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
     return error;
   }
   xfs_ilock(ip, XFS_ILOCK_EXCL);
   error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot, ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
   if (error) goto out_trans_cancel;
   xfs_trans_ijoin(tp, ip, 0);
-  xfs_defer_init(&dfops, &firstfsb);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &firstfsb);
   error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb, done);
   if (error) goto out_bmap_cancel;
   xfs_defer_ijoin(tp->t_dfops, ip);
   error = xfs_defer_finish(&tp, tp->t_dfops);
   if (error) goto out_bmap_cancel;
   error = xfs_trans_commit(tp);
 out_unlock:
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
   return error;
 out_bmap_cancel:
   xfs_defer_cancel(tp->t_dfops);
 out_trans_cancel:
   xfs_trans_cancel(tp);
   goto out_unlock;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 219 
							 | 
					
	--- initial
+++ final
@@ -1,92 +1,91 @@
 int                                    /* error code */
 xfs_bmap_add_attrfork(xfs_inode_t *ip, /* incore inode pointer */
                       int size,        /* space new attribute needs */
                       int rsvd)        /* xact may use reserved blks */
 {
   xfs_fsblock_t firstblock;   /* 1st block/ag allocated */
   struct xfs_defer_ops dfops; /* freed extent records */
   xfs_mount_t *mp;            /* mount structure */
   xfs_trans_t *tp;            /* transaction pointer */
   int blks;                   /* space reservation */
   int version = 1;            /* superblock attr version */
   int logflags;               /* logging flags */
   int error;                  /* error return value */
   ASSERT(XFS_IFORK_Q(ip) == 0);
   mp = ip->i_mount;
   ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
   blks = XFS_ADDAFORK_SPACE_RES(mp);
   error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, rsvd ? XFS_TRANS_RESERVE : 0, &tp);
   if (error) return error;
-  xfs_defer_init(&dfops, &firstblock);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &firstblock);
   xfs_ilock(ip, XFS_ILOCK_EXCL);
   error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : XFS_QMOPT_RES_REGBLKS);
   if (error) goto trans_cancel;
   if (XFS_IFORK_Q(ip)) goto trans_cancel;
   if (ip->i_d.di_anextents != 0) {
     error = -EFSCORRUPTED;
     goto trans_cancel;
   }
   if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
     /*
      * For inodes coming from pre-6.2 filesystems.
      */
     ASSERT(ip->i_d.di_aformat == 0);
     ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
   }
   xfs_trans_ijoin(tp, ip, 0);
   xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
   switch (ip->i_d.di_format) {
   case XFS_DINODE_FMT_DEV: ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; break;
   case XFS_DINODE_FMT_LOCAL:
   case XFS_DINODE_FMT_EXTENTS:
   case XFS_DINODE_FMT_BTREE:
     ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
     if (!ip->i_d.di_forkoff)
       ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
     else if (mp->m_flags & XFS_MOUNT_ATTR2)
       version = 2;
     break;
   default:
     ASSERT(0);
     error = -EINVAL;
     goto trans_cancel;
   }
   ASSERT(ip->i_afp == NULL);
   ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
   ip->i_afp->if_flags = XFS_IFEXTENTS;
   logflags = 0;
   switch (ip->i_d.di_format) {
   case XFS_DINODE_FMT_LOCAL: error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &logflags); break;
   case XFS_DINODE_FMT_EXTENTS: error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, &logflags); break;
   case XFS_DINODE_FMT_BTREE: error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &logflags); break;
   default: error = 0; break;
   }
   if (logflags) xfs_trans_log_inode(tp, ip, logflags);
   if (error) goto bmap_cancel;
   if (!xfs_sb_version_hasattr(&mp->m_sb) || (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
     bool log_sb = false;
     spin_lock(&mp->m_sb_lock);
     if (!xfs_sb_version_hasattr(&mp->m_sb)) {
       xfs_sb_version_addattr(&mp->m_sb);
       log_sb = true;
     }
     if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
       xfs_sb_version_addattr2(&mp->m_sb);
       log_sb = true;
     }
     spin_unlock(&mp->m_sb_lock);
     if (log_sb) xfs_log_sb(tp);
   }
   error = xfs_defer_finish(&tp, &dfops);
   if (error) goto bmap_cancel;
   error = xfs_trans_commit(tp);
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
   return error;
 bmap_cancel:
   xfs_defer_cancel(&dfops);
 trans_cancel:
   xfs_trans_cancel(tp);
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 220 
							 | 
					
	--- initial
+++ final
@@ -1,22 +1,21 @@
 int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_fsb) {
   struct xfs_mount *mp = ip->i_mount;
   struct xfs_trans *tp;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t firstfsb;
   int error;
   error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
   if (error) return error;
-  xfs_defer_init(&dfops, &firstfsb);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &firstfsb);
   xfs_ilock(ip, XFS_ILOCK_EXCL);
   xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
   error = xfs_bmap_split_extent_at(tp, ip, split_fsb, &firstfsb);
   if (error) goto out;
   error = xfs_defer_finish(&tp, &dfops);
   if (error) goto out;
   return xfs_trans_commit(tp);
 out:
   xfs_defer_cancel(&dfops);
   xfs_trans_cancel(tp);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 221 
							 | 
					
	--- initial
+++ final
@@ -1,86 +1,86 @@
 STATIC int xfs_dquot_disk_alloc(struct xfs_trans **tpp, struct xfs_dquot *dqp, struct xfs_buf **bpp) {
   struct xfs_bmbt_irec map;
   struct xfs_trans *tp = *tpp;
   struct xfs_mount *mp = tp->t_mountp;
   struct xfs_buf *bp;
   struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags);
   xfs_fsblock_t firstblock;
   int nmaps = 1;
   int error;
   trace_xfs_dqalloc(dqp);
-  xfs_defer_init(tp->t_dfops, &firstblock);
+  xfs_defer_init(NULL, tp->t_dfops, &firstblock);
   xfs_ilock(quotip, XFS_ILOCK_EXCL);
   if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
     /*
      * Return if this type of quotas is turned off while we didn't
      * have an inode lock
      */
     xfs_iunlock(quotip, XFS_ILOCK_EXCL);
     return -ESRCH;
   }
   /* Create the block mapping. */
   xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
   error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset, XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps);
   if (error) goto error0;
   ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
   ASSERT(nmaps == 1);
   ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK));
   /*
    * Keep track of the blkno to save a lookup later
    */
   dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
   /* now we can just get the buffer (there's nothing to read yet) */
   bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, 0);
   if (!bp) {
     error = -ENOMEM;
     goto error1;
   }
   bp->b_ops = &xfs_dquot_buf_ops;
   /*
    * Make a chunk of dquots out of this buffer and log
    * the entire thing.
    */
   xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
   xfs_buf_set_ref(bp, XFS_DQUOT_REF);
   /*
    * Hold the buffer and join it to the dfops so that we'll still own
    * the buffer when we return to the caller.  The buffer disposal on
    * error must be paid attention to very carefully, as it has been
    * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
    * code when allocating a new dquot record" in 2005, and the later
    * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
    * the buffer locked across the _defer_finish call.  We can now do
    * this correctly with xfs_defer_bjoin.
    *
    * Above, we allocated a disk block for the dquot information and
    * used get_buf to initialize the dquot.  If the _defer_bjoin fails,
    * the buffer is still locked to *tpp, so we must _bhold_release and
    * then _trans_brelse the buffer.  If the _defer_finish fails, the old
    * transaction is gone but the new buffer is not joined or held to any
    * transaction, so we must _buf_relse it.
    *
    * If everything succeeds, the caller of this function is returned a
    * buffer that is locked and held to the transaction.  The caller
    * is responsible for unlocking any buffer passed back, either
    * manually or by committing the transaction.
    */
   xfs_trans_bhold(tp, bp);
   error = xfs_defer_bjoin(tp->t_dfops, bp);
   if (error) {
     xfs_trans_bhold_release(tp, bp);
     xfs_trans_brelse(tp, bp);
     goto error1;
   }
   error = xfs_defer_finish(tpp, tp->t_dfops);
   tp = *tpp;
   if (error) {
     xfs_buf_relse(bp);
     goto error1;
   }
   *bpp = bp;
   return 0;
 error1:
   xfs_defer_cancel(tp->t_dfops);
 error0:
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
+		xfs_defer_init(NULL, e1, e2);
<|end_of_text|> 
 | 222 
							 | 
					
	--- initial
+++ final
@@ -1,28 +1,27 @@
 static int xfs_qm_dqread_alloc(struct xfs_mount *mp, struct xfs_dquot *dqp, struct xfs_buf **bpp) {
   struct xfs_trans *tp;
   struct xfs_defer_ops dfops;
   struct xfs_buf *bp;
   xfs_fsblock_t firstblock;
   int error;
   error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc, XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
   if (error) goto err;
-  xfs_defer_init(&dfops, &firstblock);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &firstblock);
   error = xfs_dquot_disk_alloc(&tp, dqp, &bp);
   if (error) goto err_cancel;
   error = xfs_trans_commit(tp);
   if (error) {
     /*
      * Buffer was held to the transaction, so we have to unlock it
      * manually here because we're not passing it back.
      */
     xfs_buf_relse(bp);
     goto err;
   }
   *bpp = bp;
   return 0;
 err_cancel:
   xfs_trans_cancel(tp);
 err:
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 223 
							 | 
					
	--- initial
+++ final
@@ -1,122 +1,121 @@
 int xfs_create(xfs_inode_t *dp, struct xfs_name *name, umode_t mode, dev_t rdev, xfs_inode_t **ipp) {
   int is_dir = S_ISDIR(mode);
   struct xfs_mount *mp = dp->i_mount;
   struct xfs_inode *ip = NULL;
   struct xfs_trans *tp = NULL;
   int error;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t first_block;
   bool unlock_dp_on_error = false;
   prid_t prid;
   struct xfs_dquot *udqp = NULL;
   struct xfs_dquot *gdqp = NULL;
   struct xfs_dquot *pdqp = NULL;
   struct xfs_trans_res *tres;
   uint resblks;
   trace_xfs_create(dp, name);
   if (XFS_FORCED_SHUTDOWN(mp)) return -EIO;
   prid = xfs_get_initial_prid(dp);
   /*
    * Make sure that we have allocated dquot(s) on disk.
    */
   error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()), xfs_kgid_to_gid(current_fsgid()), prid, XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp, &pdqp);
   if (error) return error;
   if (is_dir) {
     resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
     tres = &M_RES(mp)->tr_mkdir;
   } else {
     resblks = XFS_CREATE_SPACE_RES(mp, name->len);
     tres = &M_RES(mp)->tr_create;
   }
   /*
    * Initially assume that the file does not exist and
    * reserve the resources for that case.  If that is not
    * the case we'll drop the one we have and get a more
    * appropriate transaction later.
    */
   error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
   if (error == -ENOSPC) {
     /* flush outstanding delalloc blocks and retry */
     xfs_flush_inodes(mp);
     error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
   }
   if (error) goto out_release_inode;
   xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
   unlock_dp_on_error = true;
-  xfs_defer_init(&dfops, &first_block);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &first_block);
   /*
    * Reserve disk quota and the inode.
    */
   error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, pdqp, resblks, 1, 0);
   if (error) goto out_trans_cancel;
   /*
    * A newly created regular or special file just has one directory
    * entry pointing to them, but a directory also the "." entry
    * pointing to itself.
    */
   error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
   if (error) goto out_trans_cancel;
   /*
    * Now we join the directory inode to the transaction.  We do not do it
    * earlier because xfs_dir_ialloc might commit the previous transaction
    * (and release all the locks).  An error from here on will result in
    * the transaction cancel unlocking dp so don't do it explicitly in the
    * error path.
    */
   xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
   unlock_dp_on_error = false;
   error = xfs_dir_createname(tp, dp, name, ip->i_ino, &first_block, resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
   if (error) {
     ASSERT(error != -ENOSPC);
     goto out_trans_cancel;
   }
   xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
   if (is_dir) {
     error = xfs_dir_init(tp, ip, dp);
     if (error) goto out_bmap_cancel;
     error = xfs_bumplink(tp, dp);
     if (error) goto out_bmap_cancel;
   }
   /*
    * If this is a synchronous mount, make sure that the
    * create transaction goes to disk before returning to
    * the user.
    */
   if (mp->m_flags & (XFS_MOUNT_WSYNC | XFS_MOUNT_DIRSYNC)) xfs_trans_set_sync(tp);
   /*
    * Attach the dquot(s) to the inodes and modify them incore.
    * These ids of the inode couldn't have changed since the new
    * inode has been locked ever since it was created.
    */
   xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
   error = xfs_defer_finish(&tp, &dfops);
   if (error) goto out_bmap_cancel;
   error = xfs_trans_commit(tp);
   if (error) goto out_release_inode;
   xfs_qm_dqrele(udqp);
   xfs_qm_dqrele(gdqp);
   xfs_qm_dqrele(pdqp);
   *ipp = ip;
   return 0;
 out_bmap_cancel:
   xfs_defer_cancel(&dfops);
 out_trans_cancel:
   xfs_trans_cancel(tp);
 out_release_inode:
   /*
    * Wait until after the current transaction is aborted to finish the
    * setup of the inode and release the inode.  This prevents recursive
    * transactions and deadlocks from xfs_inactive.
    */
   if (ip) {
     xfs_finish_inode_setup(ip);
     IRELE(ip);
   }
   xfs_qm_dqrele(udqp);
   xfs_qm_dqrele(gdqp);
   xfs_qm_dqrele(pdqp);
   if (unlock_dp_on_error) xfs_iunlock(dp, XFS_ILOCK_EXCL);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 224 
							 | 
					
	--- initial
+++ final
@@ -1,68 +1,67 @@
 STATIC int xfs_inactive_ifree(struct xfs_inode *ip) {
   struct xfs_defer_ops dfops;
   xfs_fsblock_t first_block;
   struct xfs_mount *mp = ip->i_mount;
   struct xfs_trans *tp;
   int error;
   /*
    * We try to use a per-AG reservation for any block needed by the finobt
    * tree, but as the finobt feature predates the per-AG reservation
    * support a degraded file system might not have enough space for the
    * reservation at mount time.  In that case try to dip into the reserved
    * pool and pray.
    *
    * Send a warning if the reservation does happen to fail, as the inode
    * now remains allocated and sits on the unlinked list until the fs is
    * repaired.
    */
   if (unlikely(mp->m_inotbt_nores)) {
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
   } else {
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
   }
   if (error) {
     if (error == -ENOSPC) {
       xfs_warn_ratelimited(mp, "Failed to remove inode(s) from unlinked list. "
                                "Please free space, unmount and run xfs_repair.");
     } else {
       ASSERT(XFS_FORCED_SHUTDOWN(mp));
     }
     return error;
   }
   xfs_ilock(ip, XFS_ILOCK_EXCL);
   xfs_trans_ijoin(tp, ip, 0);
-  xfs_defer_init(&dfops, &first_block);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &first_block);
   error = xfs_ifree(tp, ip);
   if (error) {
     /*
      * If we fail to free the inode, shut down.  The cancel
      * might do that, we need to make sure.  Otherwise the
      * inode might be lost for a long time or forever.
      */
     if (!XFS_FORCED_SHUTDOWN(mp)) {
       xfs_notice(mp, "%s: xfs_ifree returned error %d", __func__, error);
       xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
     }
     xfs_trans_cancel(tp);
     xfs_iunlock(ip, XFS_ILOCK_EXCL);
     return error;
   }
   /*
    * Credit the quota account(s). The inode is gone.
    */
   xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
   /*
    * Just ignore errors at this point.  There is nothing we can do except
    * to try to keep going. Make sure it's not a silent error.
    */
   error = xfs_defer_finish(&tp, &dfops);
   if (error) {
     xfs_notice(mp, "%s: xfs_defer_finish returned error %d", __func__, error);
     xfs_defer_cancel(&dfops);
   }
   error = xfs_trans_commit(tp);
   if (error) xfs_notice(mp, "%s: xfs_trans_commit returned error %d", __func__, error);
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
   return 0;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 225 
							 | 
					
	--- initial
+++ final
@@ -1,75 +1,74 @@
 int xfs_itruncate_extents_flags(struct xfs_trans **tpp, struct xfs_inode *ip, int whichfork, xfs_fsize_t new_size, int flags) {
   struct xfs_mount *mp = ip->i_mount;
   struct xfs_trans *tp = *tpp;
   struct xfs_defer_ops *odfops = tp->t_dfops;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t first_block;
   xfs_fileoff_t first_unmap_block;
   xfs_fileoff_t last_block;
   xfs_filblks_t unmap_len;
   int error = 0;
   int done = 0;
   ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
   ASSERT(!atomic_read(&VFS_I(ip)->i_count) || xfs_isilocked(ip, XFS_IOLOCK_EXCL));
   ASSERT(new_size <= XFS_ISIZE(ip));
   ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
   ASSERT(ip->i_itemp != NULL);
   ASSERT(ip->i_itemp->ili_lock_flags == 0);
   ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
   trace_xfs_itruncate_extents_start(ip, new_size);
   flags |= xfs_bmapi_aflag(whichfork);
   /*
    * Since it is possible for space to become allocated beyond
    * the end of the file (in a crash where the space is allocated
    * but the inode size is not yet updated), simply remove any
    * blocks which show up between the new EOF and the maximum
    * possible file size.  If the first block to be removed is
    * beyond the maximum file size (ie it is the same as last_block),
    * then there is nothing to do.
    */
   first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
   last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
   if (first_unmap_block == last_block) return 0;
   ASSERT(first_unmap_block < last_block);
   unmap_len = last_block - first_unmap_block + 1;
   while (!done) {
-    xfs_defer_init(&dfops, &first_block);
-    tp->t_dfops = &dfops;
+    xfs_defer_init(tp, &dfops, &first_block);
     error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags, XFS_ITRUNC_MAX_EXTENTS, &first_block, &done);
     if (error) goto out_bmap_cancel;
     /*
      * Duplicate the transaction that has the permanent
      * reservation and commit the old transaction.
      */
     xfs_defer_ijoin(tp->t_dfops, ip);
     error = xfs_defer_finish(&tp, tp->t_dfops);
     if (error) goto out_bmap_cancel;
     error = xfs_trans_roll_inode(&tp, ip);
     if (error) goto out;
   }
   if (whichfork == XFS_DATA_FORK) {
     /* Remove all pending CoW reservations. */
     error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block, last_block, true);
     if (error) goto out;
     xfs_itruncate_clear_reflink_flags(ip);
   }
   /*
    * Always re-log the inode so that our permanent transaction can keep
    * on rolling it forward in the log.
    */
   xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
   trace_xfs_itruncate_extents_end(ip, new_size);
 out:
   /* ->t_dfops points to local stack, don't leak it! */
   tp->t_dfops = odfops;
   *tpp = tp;
   return error;
 out_bmap_cancel:
   /*
    * If the bunmapi call encounters an error, return to the caller where
    * the transaction can be properly aborted.  We just need to make sure
    * we're not holding any resources that we were not when we came in.
    */
   xfs_defer_cancel(tp->t_dfops);
   goto out;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 226 
							 | 
					
	--- initial
+++ final
@@ -1,69 +1,68 @@
 int xfs_link(xfs_inode_t *tdp, xfs_inode_t *sip, struct xfs_name *target_name) {
   xfs_mount_t *mp = tdp->i_mount;
   xfs_trans_t *tp;
   int error;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t first_block;
   int resblks;
   trace_xfs_link(tdp, target_name);
   ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
   if (XFS_FORCED_SHUTDOWN(mp)) return -EIO;
   error = xfs_qm_dqattach(sip);
   if (error) goto std_return;
   error = xfs_qm_dqattach(tdp);
   if (error) goto std_return;
   resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
   error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
   if (error == -ENOSPC) {
     resblks = 0;
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
   }
   if (error) goto std_return;
   xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
   xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
   xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
   /*
    * If we are using project inheritance, we only allow hard link
    * creation in our tree when the project IDs are the same; else
    * the tree quota mechanism could be circumvented.
    */
   if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
     error = -EXDEV;
     goto error_return;
   }
   if (!resblks) {
     error = xfs_dir_canenter(tp, tdp, target_name);
     if (error) goto error_return;
   }
-  xfs_defer_init(&dfops, &first_block);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &first_block);
   /*
    * Handle initial link state of O_TMPFILE inode
    */
   if (VFS_I(sip)->i_nlink == 0) {
     error = xfs_iunlink_remove(tp, sip);
     if (error) goto error_return;
   }
   error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, &first_block, resblks);
   if (error) goto error_return;
   xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
   error = xfs_bumplink(tp, sip);
   if (error) goto error_return;
   /*
    * If this is a synchronous mount, make sure that the
    * link transaction goes to disk before returning to
    * the user.
    */
   if (mp->m_flags & (XFS_MOUNT_WSYNC | XFS_MOUNT_DIRSYNC)) xfs_trans_set_sync(tp);
   error = xfs_defer_finish(&tp, &dfops);
   if (error) {
     xfs_defer_cancel(&dfops);
     goto error_return;
   }
   return xfs_trans_commit(tp);
 error_return:
   xfs_trans_cancel(tp);
 std_return:
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 227 
							 | 
					
	--- initial
+++ final
@@ -1,93 +1,92 @@
 int xfs_remove(xfs_inode_t *dp, struct xfs_name *name, xfs_inode_t *ip) {
   xfs_mount_t *mp = dp->i_mount;
   xfs_trans_t *tp = NULL;
   int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
   int error = 0;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t first_block;
   uint resblks;
   trace_xfs_remove(dp, name);
   if (XFS_FORCED_SHUTDOWN(mp)) return -EIO;
   error = xfs_qm_dqattach(dp);
   if (error) goto std_return;
   error = xfs_qm_dqattach(ip);
   if (error) goto std_return;
   /*
    * We try to get the real space reservation first,
    * allowing for directory btree deletion(s) implying
    * possible bmap insert(s).  If we can't get the space
    * reservation then we use 0 instead, and avoid the bmap
    * btree insert(s) in the directory code by, if the bmap
    * insert tries to happen, instead trimming the LAST
    * block from the directory.
    */
   resblks = XFS_REMOVE_SPACE_RES(mp);
   error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
   if (error == -ENOSPC) {
     resblks = 0;
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0, &tp);
   }
   if (error) {
     ASSERT(error != -ENOSPC);
     goto std_return;
   }
   xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
   xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
   xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
   /*
    * If we're removing a directory perform some additional validation.
    */
   if (is_dir) {
     ASSERT(VFS_I(ip)->i_nlink >= 2);
     if (VFS_I(ip)->i_nlink != 2) {
       error = -ENOTEMPTY;
       goto out_trans_cancel;
     }
     if (!xfs_dir_isempty(ip)) {
       error = -ENOTEMPTY;
       goto out_trans_cancel;
     }
     /* Drop the link from ip's "..".  */
     error = xfs_droplink(tp, dp);
     if (error) goto out_trans_cancel;
     /* Drop the "." link from ip to self.  */
     error = xfs_droplink(tp, ip);
     if (error) goto out_trans_cancel;
   } else {
     /*
      * When removing a non-directory we need to log the parent
      * inode here.  For a directory this is done implicitly
      * by the xfs_droplink call for the ".." entry.
      */
     xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
   }
   xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   /* Drop the link from dp to ip. */
   error = xfs_droplink(tp, ip);
   if (error) goto out_trans_cancel;
-  xfs_defer_init(&dfops, &first_block);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &first_block);
   error = xfs_dir_removename(tp, dp, name, ip->i_ino, &first_block, resblks);
   if (error) {
     ASSERT(error != -ENOENT);
     goto out_bmap_cancel;
   }
   /*
    * If this is a synchronous mount, make sure that the
    * remove transaction goes to disk before returning to
    * the user.
    */
   if (mp->m_flags & (XFS_MOUNT_WSYNC | XFS_MOUNT_DIRSYNC)) xfs_trans_set_sync(tp);
   error = xfs_defer_finish(&tp, &dfops);
   if (error) goto out_bmap_cancel;
   error = xfs_trans_commit(tp);
   if (error) goto std_return;
   if (is_dir && xfs_inode_is_filestream(ip)) xfs_filestream_deassociate(ip);
   return 0;
 out_bmap_cancel:
   xfs_defer_cancel(&dfops);
 out_trans_cancel:
   xfs_trans_cancel(tp);
 std_return:
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 228 
							 | 
					
	--- initial
+++ final
@@ -1,124 +1,123 @@
 int xfs_iomap_write_allocate(xfs_inode_t *ip, int whichfork, xfs_off_t offset, xfs_bmbt_irec_t *imap) {
   xfs_mount_t *mp = ip->i_mount;
   xfs_fileoff_t offset_fsb, last_block;
   xfs_fileoff_t end_fsb, map_start_fsb;
   xfs_fsblock_t first_block;
   struct xfs_defer_ops dfops;
   xfs_filblks_t count_fsb;
   xfs_trans_t *tp;
   int nimaps;
   int error = 0;
   int flags = XFS_BMAPI_DELALLOC;
   int nres;
   if (whichfork == XFS_COW_FORK) flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
   /*
    * Make sure that the dquots are there.
    */
   error = xfs_qm_dqattach(ip);
   if (error) return error;
   offset_fsb = XFS_B_TO_FSBT(mp, offset);
   count_fsb = imap->br_blockcount;
   map_start_fsb = imap->br_startoff;
   XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
   while (count_fsb != 0) {
     /*
      * Set up a transaction with which to allocate the
      * backing store for the file.  Do allocations in a
      * loop until we get some space in the range we are
      * interested in.  The other space that might be allocated
      * is in the delayed allocation extent on which we sit
      * but before our buffer starts.
      */
     nimaps = 0;
     while (nimaps == 0) {
       nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
       /*
        * We have already reserved space for the extent and any
        * indirect blocks when creating the delalloc extent,
        * there is no need to reserve space in this transaction
        * again.
        */
       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, XFS_TRANS_RESERVE, &tp);
       if (error) return error;
       xfs_ilock(ip, XFS_ILOCK_EXCL);
       xfs_trans_ijoin(tp, ip, 0);
-      xfs_defer_init(&dfops, &first_block);
-      tp->t_dfops = &dfops;
+      xfs_defer_init(tp, &dfops, &first_block);
       /*
        * it is possible that the extents have changed since
        * we did the read call as we dropped the ilock for a
        * while. We have to be careful about truncates or hole
        * punchs here - we are not allowed to allocate
        * non-delalloc blocks here.
        *
        * The only protection against truncation is the pages
        * for the range we are being asked to convert are
        * locked and hence a truncate will block on them
        * first.
        *
        * As a result, if we go beyond the range we really
        * need and hit an delalloc extent boundary followed by
        * a hole while we have excess blocks in the map, we
        * will fill the hole incorrectly and overrun the
        * transaction reservation.
        *
        * Using a single map prevents this as we are forced to
        * check each map we look for overlap with the desired
        * range and abort as soon as we find it. Also, given
        * that we only return a single map, having one beyond
        * what we can return is probably a bit silly.
        *
        * We also need to check that we don't go beyond EOF;
        * this is a truncate optimisation as a truncate sets
        * the new file size before block on the pages we
        * currently have locked under writeback. Because they
        * are about to be tossed, we don't need to write them
        * back....
        */
       nimaps = 1;
       end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
       error = xfs_bmap_last_offset(ip, &last_block, XFS_DATA_FORK);
       if (error) goto trans_cancel;
       last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
       if ((map_start_fsb + count_fsb) > last_block) {
         count_fsb = last_block - map_start_fsb;
         if (count_fsb == 0) {
           error = -EAGAIN;
           goto trans_cancel;
         }
       }
       /*
        * From this point onwards we overwrite the imap
        * pointer that the caller gave to us.
        */
       error = xfs_bmapi_write(tp, ip, map_start_fsb, count_fsb, flags, &first_block, nres, imap, &nimaps);
       if (error) goto trans_cancel;
       error = xfs_defer_finish(&tp, tp->t_dfops);
       if (error) goto trans_cancel;
       error = xfs_trans_commit(tp);
       if (error) goto error0;
       xfs_iunlock(ip, XFS_ILOCK_EXCL);
     }
     /*
      * See if we were able to allocate an extent that
      * covers at least part of the callers request
      */
     if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) return xfs_alert_fsblock_zero(ip, imap);
     if ((offset_fsb >= imap->br_startoff) && (offset_fsb < (imap->br_startoff + imap->br_blockcount))) {
       XFS_STATS_INC(mp, xs_xstrat_quick);
       return 0;
     }
     /*
      * So far we have not mapped the requested part of the
      * file, just surrounding data, try again.
      */
     count_fsb -= imap->br_blockcount;
     map_start_fsb = imap->br_startoff + imap->br_blockcount;
   }
 trans_cancel:
   xfs_defer_cancel(tp->t_dfops);
   xfs_trans_cancel(tp);
 error0:
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 230 
							 | 
					
	--- initial
+++ final
@@ -1,118 +1,117 @@
 int xfs_iomap_write_direct(xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *imap, int nmaps) {
   xfs_mount_t *mp = ip->i_mount;
   xfs_fileoff_t offset_fsb;
   xfs_fileoff_t last_fsb;
   xfs_filblks_t count_fsb, resaligned;
   xfs_fsblock_t firstfsb;
   xfs_extlen_t extsz;
   int nimaps;
   int quota_flag;
   int rt;
   xfs_trans_t *tp;
   struct xfs_defer_ops dfops;
   uint qblocks, resblks, resrtextents;
   int error;
   int lockmode;
   int bmapi_flags = XFS_BMAPI_PREALLOC;
   uint tflags = 0;
   rt = XFS_IS_REALTIME_INODE(ip);
   extsz = xfs_get_extsz_hint(ip);
   lockmode = XFS_ILOCK_SHARED; /* locked by caller */
   ASSERT(xfs_isilocked(ip, lockmode));
   offset_fsb = XFS_B_TO_FSBT(mp, offset);
   last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
   if ((offset + count) > XFS_ISIZE(ip)) {
     /*
      * Assert that the in-core extent list is present since this can
      * call xfs_iread_extents() and we only have the ilock shared.
      * This should be safe because the lock was held around a bmapi
      * call in the caller and we only need it to access the in-core
      * list.
      */
     ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & XFS_IFEXTENTS);
     error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
     if (error) goto out_unlock;
   } else {
     if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) last_fsb = min(last_fsb, (xfs_fileoff_t)imap->br_blockcount + imap->br_startoff);
   }
   count_fsb = last_fsb - offset_fsb;
   ASSERT(count_fsb > 0);
   resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
   if (unlikely(rt)) {
     resrtextents = qblocks = resaligned;
     resrtextents /= mp->m_sb.sb_rextsize;
     resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
     quota_flag = XFS_QMOPT_RES_RTBLKS;
   } else {
     resrtextents = 0;
     resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
     quota_flag = XFS_QMOPT_RES_REGBLKS;
   }
   /*
    * Drop the shared lock acquired by the caller, attach the dquot if
    * necessary and move on to transaction setup.
    */
   xfs_iunlock(ip, lockmode);
   error = xfs_qm_dqattach(ip);
   if (error) return error;
   /*
    * For DAX, we do not allocate unwritten extents, but instead we zero
    * the block before we commit the transaction.  Ideally we'd like to do
    * this outside the transaction context, but if we commit and then crash
    * we may not have zeroed the blocks and this will be exposed on
    * recovery of the allocation. Hence we must zero before commit.
    *
    * Further, if we are mapping unwritten extents here, we need to zero
    * and convert them to written so that we don't need an unwritten extent
    * callback for DAX. This also means that we need to be able to dip into
    * the reserve block pool for bmbt block allocation if there is no space
    * left but we need to do unwritten extent conversion.
    */
   if (IS_DAX(VFS_I(ip))) {
     bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
     if (imap->br_state == XFS_EXT_UNWRITTEN) {
       tflags |= XFS_TRANS_RESERVE;
       resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
     }
   }
   error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents, tflags, &tp);
   if (error) return error;
   lockmode = XFS_ILOCK_EXCL;
   xfs_ilock(ip, lockmode);
   error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
   if (error) goto out_trans_cancel;
   xfs_trans_ijoin(tp, ip, 0);
   /*
    * From this point onwards we overwrite the imap pointer that the
    * caller gave to us.
    */
-  xfs_defer_init(&dfops, &firstfsb);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &firstfsb);
   nimaps = 1;
   error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, &firstfsb, resblks, imap, &nimaps);
   if (error) goto out_bmap_cancel;
   /*
    * Complete the transaction
    */
   error = xfs_defer_finish(&tp, tp->t_dfops);
   if (error) goto out_bmap_cancel;
   error = xfs_trans_commit(tp);
   if (error) goto out_unlock;
   /*
    * Copy any maps to caller's array and return any error.
    */
   if (nimaps == 0) {
     error = -ENOSPC;
     goto out_unlock;
   }
   if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) error = xfs_alert_fsblock_zero(ip, imap);
 out_unlock:
   xfs_iunlock(ip, lockmode);
   return error;
 out_bmap_cancel:
   xfs_defer_cancel(tp->t_dfops);
   xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
 out_trans_cancel:
   xfs_trans_cancel(tp);
   goto out_unlock;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 231 
							 | 
					
	--- initial
+++ final
@@ -1,88 +1,87 @@
 int xfs_iomap_write_unwritten(xfs_inode_t *ip, xfs_off_t offset, xfs_off_t count, bool update_isize) {
   xfs_mount_t *mp = ip->i_mount;
   xfs_fileoff_t offset_fsb;
   xfs_filblks_t count_fsb;
   xfs_filblks_t numblks_fsb;
   xfs_fsblock_t firstfsb;
   int nimaps;
   xfs_trans_t *tp;
   xfs_bmbt_irec_t imap;
   struct xfs_defer_ops dfops;
   struct inode *inode = VFS_I(ip);
   xfs_fsize_t i_size;
   uint resblks;
   int error;
   trace_xfs_unwritten_convert(ip, offset, count);
   offset_fsb = XFS_B_TO_FSBT(mp, offset);
   count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
   count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
   /*
    * Reserve enough blocks in this transaction for two complete extent
    * btree splits.  We may be converting the middle part of an unwritten
    * extent and in this case we will insert two new extents in the btree
    * each of which could cause a full split.
    *
    * This reservation amount will be used in the first call to
    * xfs_bmbt_split() to select an AG with enough space to satisfy the
    * rest of the operation.
    */
   resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
   do {
     /*
      * Set up a transaction to convert the range of extents
      * from unwritten to real. Do allocations in a loop until
      * we have covered the range passed in.
      *
      * Note that we can't risk to recursing back into the filesystem
      * here as we might be asked to write out the same inode that we
      * complete here and might deadlock on the iolock.
      */
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
     if (error) return error;
     xfs_ilock(ip, XFS_ILOCK_EXCL);
     xfs_trans_ijoin(tp, ip, 0);
     /*
      * Modify the unwritten extent state of the buffer.
      */
-    xfs_defer_init(&dfops, &firstfsb);
-    tp->t_dfops = &dfops;
+    xfs_defer_init(tp, &dfops, &firstfsb);
     nimaps = 1;
     error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, XFS_BMAPI_CONVERT, &firstfsb, resblks, &imap, &nimaps);
     if (error) goto error_on_bmapi_transaction;
     /*
      * Log the updated inode size as we go.  We have to be careful
      * to only log it up to the actual write offset if it is
      * halfway into a block.
      */
     i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
     if (i_size > offset + count) i_size = offset + count;
     if (update_isize && i_size > i_size_read(inode)) i_size_write(inode, i_size);
     i_size = xfs_new_eof(ip, i_size);
     if (i_size) {
       ip->i_d.di_size = i_size;
       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
     }
     error = xfs_defer_finish(&tp, tp->t_dfops);
     if (error) goto error_on_bmapi_transaction;
     error = xfs_trans_commit(tp);
     xfs_iunlock(ip, XFS_ILOCK_EXCL);
     if (error) return error;
     if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) return xfs_alert_fsblock_zero(ip, &imap);
     if ((numblks_fsb = imap.br_blockcount) == 0) {
       /*
        * The numblks_fsb value should always get
        * smaller, otherwise the loop is stuck.
        */
       ASSERT(imap.br_blockcount);
       break;
     }
     offset_fsb += numblks_fsb;
     count_fsb -= numblks_fsb;
   } while (count_fsb > 0);
   return 0;
 error_on_bmapi_transaction:
   xfs_defer_cancel(tp->t_dfops);
   xfs_trans_cancel(tp);
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 232 
							 | 
					
	--- initial
+++ final
@@ -1,59 +1,59 @@
 STATIC int xlog_recover_process_intents(struct xlog *log) {
   struct xfs_defer_ops dfops;
   struct xfs_ail_cursor cur;
   struct xfs_log_item *lip;
   struct xfs_ail *ailp;
   xfs_fsblock_t firstfsb;
   int error = 0;
 #if defined(DEBUG) || defined(XFS_WARN)
   xfs_lsn_t last_lsn;
 #endif
   ailp = log->l_ailp;
   spin_lock(&ailp->ail_lock);
   lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
 #if defined(DEBUG) || defined(XFS_WARN)
   last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
 #endif
-  xfs_defer_init(&dfops, &firstfsb);
+  xfs_defer_init(NULL, &dfops, &firstfsb);
   while (lip != NULL) {
     /*
      * We're done when we see something other than an intent.
      * There should be no intents left in the AIL now.
      */
     if (!xlog_item_is_intent(lip)) {
 #ifdef DEBUG
       for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
         ASSERT(!xlog_item_is_intent(lip));
 #endif
       break;
     }
     /*
      * We should never see a redo item with a LSN higher than
      * the last transaction we found in the log at the start
      * of recovery.
      */
     ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
     /*
      * NOTE: If your intent processing routine can create more
      * deferred ops, you /must/ attach them to the dfops in this
      * routine or else those subsequent intents will get
      * replayed in the wrong order!
      */
     switch (lip->li_type) {
     case XFS_LI_EFI: error = xlog_recover_process_efi(log->l_mp, ailp, lip); break;
     case XFS_LI_RUI: error = xlog_recover_process_rui(log->l_mp, ailp, lip); break;
     case XFS_LI_CUI: error = xlog_recover_process_cui(log->l_mp, ailp, lip, &dfops); break;
     case XFS_LI_BUI: error = xlog_recover_process_bui(log->l_mp, ailp, lip, &dfops); break;
     }
     if (error) goto out;
     lip = xfs_trans_ail_cursor_next(ailp, &cur);
   }
 out:
   xfs_trans_ail_cursor_done(&cur);
   spin_unlock(&ailp->ail_lock);
   if (error)
     xfs_defer_cancel(&dfops);
   else
     error = xlog_finish_defer_ops(log->l_mp, &dfops);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
+		xfs_defer_init(NULL, e1, e2);
<|end_of_text|> 
 | 233 
							 | 
					
	--- initial
+++ final
@@ -1,82 +1,81 @@
 int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp, xfs_agnumber_t agno) {
   struct xfs_trans *tp;
   struct xfs_btree_cur *cur;
   struct xfs_buf *agbp;
   struct xfs_refcount_recovery *rr, *n;
   struct list_head debris;
   union xfs_btree_irec low;
   union xfs_btree_irec high;
   struct xfs_defer_ops dfops;
   xfs_fsblock_t fsb;
   xfs_agblock_t agbno;
   int error;
   if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START) return -EOPNOTSUPP;
   INIT_LIST_HEAD(&debris);
   /*
    * In this first part, we use an empty transaction to gather up
    * all the leftover CoW extents so that we can subsequently
    * delete them.  The empty transaction is used to avoid
    * a buffer lock deadlock if there happens to be a loop in the
    * refcountbt because we're allowed to re-grab a buffer that is
    * already attached to our transaction.  When we're done
    * recording the CoW debris we cancel the (empty) transaction
    * and everything goes away cleanly.
    */
   error = xfs_trans_alloc_empty(mp, &tp);
   if (error) return error;
   error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
   if (error) goto out_trans;
   if (!agbp) {
     error = -ENOMEM;
     goto out_trans;
   }
   cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
   /* Find all the leftover CoW staging extents. */
   memset(&low, 0, sizeof(low));
   memset(&high, 0, sizeof(high));
   low.rc.rc_startblock = XFS_REFC_COW_START;
   high.rc.rc_startblock = -1U;
   error = xfs_btree_query_range(cur, &low, &high, xfs_refcount_recover_extent, &debris);
   if (error) goto out_cursor;
   xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
   xfs_trans_brelse(tp, agbp);
   xfs_trans_cancel(tp);
   /* Now iterate the list to free the leftovers */
   list_for_each_entry_safe(rr, n, &debris, rr_list) {
     /* Set up transaction. */
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
     if (error) goto out_free;
     trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
     /* Free the orphan record */
-    xfs_defer_init(&dfops, &fsb);
-    tp->t_dfops = &dfops;
+    xfs_defer_init(tp, &dfops, &fsb);
     agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
     fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
     error = xfs_refcount_free_cow_extent(mp, tp->t_dfops, fsb, rr->rr_rrec.rc_blockcount);
     if (error) goto out_defer;
     /* Free the block. */
     xfs_bmap_add_free(mp, tp->t_dfops, fsb, rr->rr_rrec.rc_blockcount, NULL);
     error = xfs_defer_finish(&tp, tp->t_dfops);
     if (error) goto out_defer;
     error = xfs_trans_commit(tp);
     if (error) goto out_free;
     list_del(&rr->rr_list);
     kmem_free(rr);
   }
   return error;
 out_defer:
   xfs_defer_cancel(tp->t_dfops);
 out_trans:
   xfs_trans_cancel(tp);
 out_free:
   /* Free the leftover list */
   list_for_each_entry_safe(rr, n, &debris, rr_list) {
     list_del(&rr->rr_list);
     kmem_free(rr);
   }
   return error;
 out_cursor:
   xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
   xfs_trans_brelse(tp, agbp);
   goto out_trans;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 234 
							 | 
					
	--- initial
+++ final
@@ -1,74 +1,73 @@
 int xfs_reflink_allocate_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode) {
   struct xfs_mount *mp = ip->i_mount;
   xfs_fileoff_t offset_fsb = imap->br_startoff;
   xfs_filblks_t count_fsb = imap->br_blockcount;
   struct xfs_bmbt_irec got;
   struct xfs_defer_ops dfops;
   struct xfs_trans *tp = NULL;
   xfs_fsblock_t first_block;
   int nimaps, error = 0;
   bool trimmed;
   xfs_filblks_t resaligned;
   xfs_extlen_t resblks = 0;
   struct xfs_iext_cursor icur;
 retry:
   ASSERT(xfs_is_reflink_inode(ip));
   ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
   /*
    * Even if the extent is not shared we might have a preallocation for
    * it in the COW fork.  If so use it.
    */
   if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) && got.br_startoff <= offset_fsb) {
     *shared = true;
     /* If we have a real allocation in the COW fork we're done. */
     if (!isnullstartblock(got.br_startblock)) {
       xfs_trim_extent(&got, offset_fsb, count_fsb);
       *imap = got;
       goto convert;
     }
     xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
   } else {
     error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
     if (error || !*shared) goto out;
   }
   if (!tp) {
     resaligned = xfs_aligned_fsb_count(imap->br_startoff, imap->br_blockcount, xfs_get_cowextsz_hint(ip));
     resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
     xfs_iunlock(ip, *lockmode);
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
     *lockmode = XFS_ILOCK_EXCL;
     xfs_ilock(ip, *lockmode);
     if (error) return error;
     error = xfs_qm_dqattach_locked(ip, false);
     if (error) goto out;
     goto retry;
   }
   error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0, XFS_QMOPT_RES_REGBLKS);
   if (error) goto out;
   xfs_trans_ijoin(tp, ip, 0);
-  xfs_defer_init(&dfops, &first_block);
-  tp->t_dfops = &dfops;
+  xfs_defer_init(tp, &dfops, &first_block);
   nimaps = 1;
   /* Allocate the entire reservation as unwritten blocks. */
   error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount, XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block, resblks, imap, &nimaps);
   if (error) goto out_bmap_cancel;
   xfs_inode_set_cowblocks_tag(ip);
   /* Finish up. */
   error = xfs_defer_finish(&tp, tp->t_dfops);
   if (error) goto out_bmap_cancel;
   error = xfs_trans_commit(tp);
   if (error) return error;
   /*
    * Allocation succeeded but the requested range was not even partially
    * satisfied?  Bail out!
    */
   if (nimaps == 0) return -ENOSPC;
 convert:
   return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
 out_bmap_cancel:
   xfs_defer_cancel(tp->t_dfops);
   xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0, XFS_QMOPT_RES_REGBLKS);
 out:
   if (tp) xfs_trans_cancel(tp);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 235 
							 | 
					
	--- initial
+++ final
@@ -1,54 +1,53 @@
 int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip, struct xfs_trans **tpp, xfs_fileoff_t offset_fsb, xfs_fileoff_t end_fsb, bool cancel_real) {
   struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
   struct xfs_bmbt_irec got, del;
   struct xfs_iext_cursor icur;
   xfs_fsblock_t firstfsb;
   struct xfs_defer_ops dfops;
   struct xfs_defer_ops *odfops = (*tpp)->t_dfops;
   int error = 0;
   if (!xfs_is_reflink_inode(ip)) return 0;
   if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) return 0;
   /* Walk backwards until we're out of the I/O range... */
   while (got.br_startoff + got.br_blockcount > offset_fsb) {
     del = got;
     xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
     /* Extent delete may have bumped ext forward */
     if (!del.br_blockcount) {
       xfs_iext_prev(ifp, &icur);
       goto next_extent;
     }
     trace_xfs_reflink_cancel_cow(ip, &del);
     if (isnullstartblock(del.br_startblock)) {
       error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK, &icur, &got, &del);
       if (error) break;
     } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
-      xfs_defer_init(&dfops, &firstfsb);
-      (*tpp)->t_dfops = &dfops;
+      xfs_defer_init(*tpp, &dfops, &firstfsb);
       /* Free the CoW orphan record. */
       error = xfs_refcount_free_cow_extent(ip->i_mount, (*tpp)->t_dfops, del.br_startblock, del.br_blockcount);
       if (error) break;
       xfs_bmap_add_free(ip->i_mount, (*tpp)->t_dfops, del.br_startblock, del.br_blockcount, NULL);
       /* Roll the transaction */
       xfs_defer_ijoin((*tpp)->t_dfops, ip);
       error = xfs_defer_finish(tpp, (*tpp)->t_dfops);
       if (error) {
         xfs_defer_cancel((*tpp)->t_dfops);
         break;
       }
       /* Remove the mapping from the CoW fork. */
       xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
       /* Remove the quota reservation */
       error = xfs_trans_reserve_quota_nblks(NULL, ip, -(long)del.br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);
       if (error) break;
     } else {
       /* Didn't do anything, push cursor back. */
       xfs_iext_prev(ifp, &icur);
     }
   next_extent:
     if (!xfs_iext_get_extent(ifp, &icur, &got)) break;
   }
   /* clear tag if cow fork is emptied */
   if (!ifp->if_bytes) xfs_inode_clear_cowblocks_tag(ip);
   (*tpp)->t_dfops = odfops;
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 236 
							 | 
					
	--- initial
+++ final
@@ -1,94 +1,93 @@
 int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t count) {
   struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
   struct xfs_bmbt_irec got, del;
   struct xfs_trans *tp;
   xfs_fileoff_t offset_fsb;
   xfs_fileoff_t end_fsb;
   xfs_fsblock_t firstfsb;
   struct xfs_defer_ops dfops;
   int error;
   unsigned int resblks;
   xfs_filblks_t rlen;
   struct xfs_iext_cursor icur;
   trace_xfs_reflink_end_cow(ip, offset, count);
   /* No COW extents?  That's easy! */
   if (ifp->if_bytes == 0) return 0;
   offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
   end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
   /*
    * Start a rolling transaction to switch the mappings.  We're
    * unlikely ever to have to remap 16T worth of single-block
    * extents, so just cap the worst case extent count to 2^32-1.
    * Stick a warning in just in case, and avoid 64-bit division.
    */
   BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);
   if (end_fsb - offset_fsb > UINT_MAX) {
     error = -EFSCORRUPTED;
     xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);
     ASSERT(0);
     goto out;
   }
   resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount, (unsigned int)(end_fsb - offset_fsb), XFS_DATA_FORK);
   error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write, resblks, 0, XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
   if (error) goto out;
   xfs_ilock(ip, XFS_ILOCK_EXCL);
   xfs_trans_ijoin(tp, ip, 0);
   /*
    * In case of racing, overlapping AIO writes no COW extents might be
    * left by the time I/O completes for the loser of the race.  In that
    * case we are done.
    */
   if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) goto out_cancel;
   /* Walk backwards until we're out of the I/O range... */
   while (got.br_startoff + got.br_blockcount > offset_fsb) {
     del = got;
     xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
     /* Extent delete may have bumped ext forward */
     if (!del.br_blockcount) goto prev_extent;
     ASSERT(!isnullstartblock(got.br_startblock));
     /*
      * Don't remap unwritten extents; these are
      * speculatively preallocated CoW extents that have been
      * allocated but have not yet been involved in a write.
      */
     if (got.br_state == XFS_EXT_UNWRITTEN) goto prev_extent;
     /* Unmap the old blocks in the data fork. */
-    xfs_defer_init(&dfops, &firstfsb);
-    tp->t_dfops = &dfops;
+    xfs_defer_init(tp, &dfops, &firstfsb);
     rlen = del.br_blockcount;
     error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1, &firstfsb);
     if (error) goto out_defer;
     /* Trim the extent to whatever got unmapped. */
     if (rlen) { xfs_trim_extent(&del, del.br_startoff + rlen, del.br_blockcount - rlen); }
     trace_xfs_reflink_cow_remap(ip, &del);
     /* Free the CoW orphan record. */
     error = xfs_refcount_free_cow_extent(tp->t_mountp, tp->t_dfops, del.br_startblock, del.br_blockcount);
     if (error) goto out_defer;
     /* Map the new blocks into the data fork. */
     error = xfs_bmap_map_extent(tp->t_mountp, tp->t_dfops, ip, &del);
     if (error) goto out_defer;
     /* Charge this new data fork mapping to the on-disk quota. */
     xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT, (long)del.br_blockcount);
     /* Remove the mapping from the CoW fork. */
     xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
     xfs_defer_ijoin(tp->t_dfops, ip);
     error = xfs_defer_finish(&tp, tp->t_dfops);
     if (error) goto out_defer;
     if (!xfs_iext_get_extent(ifp, &icur, &got)) break;
     continue;
   prev_extent:
     if (!xfs_iext_prev_extent(ifp, &icur, &got)) break;
   }
   error = xfs_trans_commit(tp);
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
   if (error) goto out;
   return 0;
 out_defer:
   xfs_defer_cancel(tp->t_dfops);
 out_cancel:
   xfs_trans_cancel(tp);
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out:
   trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 237 
							 | 
					
	--- initial
+++ final
@@ -1,85 +1,84 @@
 STATIC int xfs_reflink_remap_extent(struct xfs_inode *ip, struct xfs_bmbt_irec *irec, xfs_fileoff_t destoff, xfs_off_t new_isize) {
   struct xfs_mount *mp = ip->i_mount;
   bool real_extent = xfs_bmap_is_real_extent(irec);
   struct xfs_trans *tp;
   xfs_fsblock_t firstfsb;
   unsigned int resblks;
   struct xfs_defer_ops dfops;
   struct xfs_bmbt_irec uirec;
   xfs_filblks_t rlen;
   xfs_filblks_t unmap_len;
   xfs_off_t newlen;
   int error;
   unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
   trace_xfs_reflink_punch_range(ip, destoff, unmap_len);
   /* No reflinking if we're low on space */
   if (real_extent) {
     error = xfs_reflink_ag_has_free_space(mp, XFS_FSB_TO_AGNO(mp, irec->br_startblock));
     if (error) goto out;
   }
   /* Start a rolling transaction to switch the mappings */
   resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
   error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
   if (error) goto out;
   xfs_ilock(ip, XFS_ILOCK_EXCL);
   xfs_trans_ijoin(tp, ip, 0);
   /* If we're not just clearing space, then do we have enough quota? */
   if (real_extent) {
     error = xfs_trans_reserve_quota_nblks(tp, ip, irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);
     if (error) goto out_cancel;
   }
   trace_xfs_reflink_remap(ip, irec->br_startoff, irec->br_blockcount, irec->br_startblock);
   /* Unmap the old blocks in the data fork. */
   rlen = unmap_len;
   while (rlen) {
-    xfs_defer_init(&dfops, &firstfsb);
-    tp->t_dfops = &dfops;
+    xfs_defer_init(tp, &dfops, &firstfsb);
     error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1, &firstfsb);
     if (error) goto out_defer;
     /*
      * Trim the extent to whatever got unmapped.
      * Remember, bunmapi works backwards.
      */
     uirec.br_startblock = irec->br_startblock + rlen;
     uirec.br_startoff = irec->br_startoff + rlen;
     uirec.br_blockcount = unmap_len - rlen;
     unmap_len = rlen;
     /* If this isn't a real mapping, we're done. */
     if (!real_extent || uirec.br_blockcount == 0) goto next_extent;
     trace_xfs_reflink_remap(ip, uirec.br_startoff, uirec.br_blockcount, uirec.br_startblock);
     /* Update the refcount tree */
     error = xfs_refcount_increase_extent(mp, tp->t_dfops, &uirec);
     if (error) goto out_defer;
     /* Map the new blocks into the data fork. */
     error = xfs_bmap_map_extent(mp, tp->t_dfops, ip, &uirec);
     if (error) goto out_defer;
     /* Update quota accounting. */
     xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, uirec.br_blockcount);
     /* Update dest isize if needed. */
     newlen = XFS_FSB_TO_B(mp, uirec.br_startoff + uirec.br_blockcount);
     newlen = min_t(xfs_off_t, newlen, new_isize);
     if (newlen > i_size_read(VFS_I(ip))) {
       trace_xfs_reflink_update_inode_size(ip, newlen);
       i_size_write(VFS_I(ip), newlen);
       ip->i_d.di_size = newlen;
       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
     }
   next_extent:
     /* Process all the deferred stuff. */
     xfs_defer_ijoin(tp->t_dfops, ip);
     error = xfs_defer_finish(&tp, tp->t_dfops);
     if (error) goto out_defer;
   }
   error = xfs_trans_commit(tp);
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
   if (error) goto out;
   return 0;
 out_defer:
   xfs_defer_cancel(tp->t_dfops);
 out_cancel:
   xfs_trans_cancel(tp);
   xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out:
   trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 238 
							 | 
					
	--- initial
+++ final
@@ -1,91 +1,90 @@
 STATIC int xfs_growfs_rt_alloc(struct xfs_mount *mp, /* file system mount point */
                                xfs_extlen_t oblocks, /* old count of blocks */
                                xfs_extlen_t nblocks, /* new count of blocks */
                                struct xfs_inode *ip) /* inode (bitmap/summary) */
 {
   xfs_fileoff_t bno;          /* block number in file */
   struct xfs_buf *bp;         /* temporary buffer for zeroing */
   xfs_daddr_t d;              /* disk block address */
   int error;                  /* error return value */
   xfs_fsblock_t firstblock;   /* first block allocated in xaction */
   struct xfs_defer_ops dfops; /* list of freed blocks */
   xfs_fsblock_t fsbno;        /* filesystem block for bno */
   struct xfs_bmbt_irec map;   /* block map output */
   int nmap;                   /* number of block maps */
   int resblks;                /* space reservation */
   struct xfs_trans *tp;
   /*
    * Allocate space to the file, as necessary.
    */
   while (oblocks < nblocks) {
     resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
     /*
      * Reserve space & log for one extent added to the file.
      */
     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtalloc, resblks, 0, 0, &tp);
     if (error) return error;
     /*
      * Lock the inode.
      */
     xfs_ilock(ip, XFS_ILOCK_EXCL);
     xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-    xfs_defer_init(&dfops, &firstblock);
-    tp->t_dfops = &dfops;
+    xfs_defer_init(tp, &dfops, &firstblock);
     /*
      * Allocate blocks to the bitmap file.
      */
     nmap = 1;
     error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks, XFS_BMAPI_METADATA, &firstblock, resblks, &map, &nmap);
     if (!error && nmap < 1) error = -ENOSPC;
     if (error) goto out_bmap_cancel;
     /*
      * Free any blocks freed up in the transaction, then commit.
      */
     error = xfs_defer_finish(&tp, tp->t_dfops);
     if (error) goto out_bmap_cancel;
     error = xfs_trans_commit(tp);
     if (error) return error;
     /*
      * Now we need to clear the allocated blocks.
      * Do this one block per transaction, to keep it simple.
      */
     for (bno = map.br_startoff, fsbno = map.br_startblock; bno < map.br_startoff + map.br_blockcount; bno++, fsbno++) {
       /*
        * Reserve log for one block zeroing.
        */
       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtzero, 0, 0, 0, &tp);
       if (error) return error;
       /*
        * Lock the bitmap inode.
        */
       xfs_ilock(ip, XFS_ILOCK_EXCL);
       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
       /*
        * Get a buffer for the block.
        */
       d = XFS_FSB_TO_DADDR(mp, fsbno);
       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, 0);
       if (bp == NULL) {
         error = -EIO;
         goto out_trans_cancel;
       }
       memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
       xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
       /*
        * Commit the transaction.
        */
       error = xfs_trans_commit(tp);
       if (error) return error;
     }
     /*
      * Go on to the next extent, if any.
      */
     oblocks = map.br_startoff + map.br_blockcount;
   }
   return 0;
 out_bmap_cancel:
   xfs_defer_cancel(tp->t_dfops);
 out_trans_cancel:
   xfs_trans_cancel(tp);
   return error;
 }<sep>@@
expression e1,e2,tp;
@@
-		xfs_defer_init(e1, e2);
-		(tp)->t_dfops = e1;
+		xfs_defer_init(tp, e1, e2);
<|end_of_text|> 
 | 239 
							 | 
					
End of preview. Expand
						in Data Studio
					
	README.md exists but content is empty.
								
- Downloads last month
 - 7