4700: #
4701: /*
4702:  */
4703: 
4704: #include "../param.h"
4705: #include "../user.h"
4706: #include "../buf.h"
4707: #include "../conf.h"
4708: #include "../systm.h"
4709: #include "../proc.h"
4710: #include "../seg.h"
4711: 
4712: /*
4713:  * This is the set of buffers proper, whose heads
4714:  * were declared in buf.h.  There can exist buffer
4715:  * headers not pointing here that are used purely
4716:  * as arguments to the I/O routines to describe
4717:  * I/O to be done-- e.g. swbuf, just below, for
4718:  * swapping.
4719:  */
4720: char    buffers[NBUF][514];
4721: struct  buf     swbuf;
4722: 
4723: /*
4724:  * Declarations of the tables for the magtape devices;
4725:  * see bdwrite.
4726:  */
4727: int     tmtab;
4728: int     httab;
4729: 
4730: /*
4731:  * The following several routines allocate and free
4732:  * buffers with various side effects.  In general the
4733:  * arguments to an allocate routine are a device and
4734:  * a block number, and the value is a pointer to
4735:  * to the buffer header; the buffer is marked "busy"
4736:  * so that no on else can touch it.  If the block was
4737:  * already in core, no I/O need be done; if it is
4738:  * already busy, the process waits until it becomes free.
4739:  * The following routines allocate a buffer:
4740:  *      getblk
4741:  *      bread
4742:  *      breada
4743:  * Eventually the buffer must be released, possibly with the
4744:  * side effect of writing it out, by using one of
4745:  *      bwrite
4746:  *      bdwrite
4747:  *      bawrite
4748:  *      brelse
4749:  */
4750: 
4751: /*
4752:  * Read in (if necessary) the block and return a buffer pointer.
4753:  */
4754: bread(dev, blkno)
4755: {
4756:         register struct buf *rbp;
4757: 
4758:         rbp = getblk(dev, blkno);
4759:         if (rbp->b_flags&B_DONE)
4760:                 return(rbp);
4761:         rbp->b_flags =| B_READ;
4762:         rbp->b_wcount = -256;
4763:         (*bdevsw[dev.d_major].d_strategy)(rbp);
4764:         iowait(rbp);
4765:         return(rbp);
4766: }
4767: /* ---------------------------       */
4768: 
4769: /*
4770:  * Read in the block, like bread, but also start I/O on the
4771:  * read-ahead block (which is not allocated to the caller)
4772:  */
4773: breada(adev, blkno, rablkno)
4774: {
4775:         register struct buf *rbp, *rabp;
4776:         register int dev;
4777: 
4778:         dev = adev;
4779:         rbp = 0;
4780:         if (!incore(dev, blkno)) {
4781:                 rbp = getblk(dev, blkno);
4782:                 if ((rbp->b_flags&B_DONE) == 0) {
4783:                         rbp->b_flags =| B_READ;
4784:                         rbp->b_wcount = -256;
4785:                         (*bdevsw[adev.d_major].d_strategy)(rbp);
4786:                 }
4787:         }
4788:         if (rablkno && !incore(dev, rablkno)) {
4789:                 rabp = getblk(dev, rablkno);
4790:                 if (rabp->b_flags & B_DONE)
4791:                         brelse(rabp);
4792:                 else {
4793:                         rabp->b_flags =| B_READ|B_ASYNC;
4794:                         rabp->b_wcount = -256;
4795:                         (*bdevsw[adev.d_major].d_strategy)(rabp);
4796:                 }
4797:         }
4798:         if (rbp==0)
4799:                 return(bread(dev, blkno));
4800:         iowait(rbp);
4801:         return(rbp);
4802: }
4803: /* ---------------------------       */
4804: 
4805: /*
4806:  * Write the buffer, waiting for completion.
4807:  * Then release the buffer.
4808:  */
4809: bwrite(bp)
4810: struct buf *bp;
4811: {
4812:         register struct buf *rbp;
4813:         register flag;
4814: 
4815:         rbp = bp;
4816:         flag = rbp->b_flags;
4817:         rbp->b_flags =& ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
4818:         rbp->b_wcount = -256;
4819:         (*bdevsw[rbp->b_dev.d_major].d_strategy)(rbp);
4820:         if ((flag&B_ASYNC) == 0) {
4821:                 iowait(rbp);
4822:                 brelse(rbp);
4823:         } else if ((flag&B_DELWRI)==0)
4824:                 geterror(rbp);
4825: }
4826: /* ---------------------------       */
4827: 
4828: /*
4829:  * Release the buffer, marking it so that if it is grabbed
4830:  * for another purpose it will be written out before being
4831:  * given up (e.g. when writing a partial block where it is
4832:  * assumed that another write for the same block will soon follow).
4833:  * This can't be done for magtape, since writes must be done
4834:  * in the same order as requested.
4835:  */
4836: bdwrite(bp)
4837: struct buf *bp;
4838: {
4839:         register struct buf *rbp;
4840:         register struct devtab *dp;
4841: 
4842:         rbp = bp;
4843:         dp = bdevsw[rbp->b_dev.d_major].d_tab;
4844:         if (dp == &tmtab || dp == &httab)
4845:                 bawrite(rbp);
4846:         else {
4847:                 rbp->b_flags =| B_DELWRI | B_DONE;
4848:                 brelse(rbp);
4849:         }
4850: }
4851: /* ---------------------------       */
4852: 
4853: /*
4854:  * Release the buffer, start I/O on it, but don't wait for completion.
4855:  */
4856: bawrite(bp)
4857: struct buf *bp;
4858: {
4859:         register struct buf *rbp;
4860: 
4861:         rbp = bp;
4862:         rbp->b_flags =| B_ASYNC;
4863:         bwrite(rbp);
4864: }
4865: /* ---------------------------       */
4866: 
4867: /* release the buffer, with no I/O implied.
4868:  */
4869: brelse(bp)
4870: struct buf *bp;
4871: {
4872:         register struct buf *rbp, **backp;
4873:         register int sps;
4874: 
4875:         rbp = bp;
4876:         if (rbp->b_flags&B_WANTED)
4877:                 wakeup(rbp);
4878:         if (bfreelist.b_flags&B_WANTED) {
4879:                 bfreelist.b_flags =& ~B_WANTED;
4880:                 wakeup(&bfreelist);
4881:         }
4882:         if (rbp->b_flags&B_ERROR)
4883:                 rbp->b_dev.d_minor = -1;  /* no assoc. on error */
4884:         backp = &bfreelist.av_back;
4885:         sps = PS->integ;
4886:         spl6();
4887:         rbp->b_flags =& ~(B_WANTED|B_BUSY|B_ASYNC);
4888:         (*backp)->av_forw = rbp;
4889:         rbp->av_back = *backp;
4890:         *backp = rbp;
4891:         rbp->av_forw = &bfreelist;
4892:         PS->integ = sps;
4893: }
4894: /* ---------------------------       */
4895: 
4896: /* See if the block is associated with some buffer
4897:  * (mainly to avoid getting hung up on a wait in breada)
4898:  */
4899: incore(adev, blkno)
4900: {
4901:         register int dev;
4902:         register struct buf *bp;
4903:         register struct devtab *dp;
4904: 
4905:         dev = adev;
4906:         dp = bdevsw[adev.d_major].d_tab;
4907:         for (bp=dp->b_forw; bp != dp; bp = bp->b_forw)
4908:                 if (bp->b_blkno==blkno && bp->b_dev==dev)
4909:                         return(bp);
4910:         return(0);
4911: }
4912: /* ---------------------------       */
4913: 
4914: /* Assign a buffer for the given block.  If the appropriate
4915:  * block is already associated, return it; otherwise search
4916:  * for the oldest non-busy buffer and reassign it.
4917:  * When a 512-byte area is wanted for some random reason
4918:  * (e.g. during exec, for the user arglist) getblk can be called
4919:  * with device NODEV to avoid unwanted associativity.
4920:  */
4921: getblk(dev, blkno)
4922: {
4923:         register struct buf *bp;
4924:         register struct devtab *dp;
4925:         extern lbolt;
4926: 
4927:         if(dev.d_major >= nblkdev)
4928:                 panic("blkdev");
4929: 
4930:     loop:
4931:         if (dev < 0)
4932:                 dp = &bfreelist;
4933:         else {
4934:                 dp = bdevsw[dev.d_major].d_tab;
4935:                 if(dp == NULL)
4936:                         panic("devtab");
4937:                 for (bp=dp->b_forw; bp != dp; bp = bp->b_forw) {
4938:                         if (bp->b_blkno!=blkno || bp->b_dev!=dev)
4939:                                 continue;
4940:                         spl6();
4941:                         if (bp->b_flags&B_BUSY) {
4942:                                 bp->b_flags =| B_WANTED;
4943:                                 sleep(bp, PRIBIO);
4944:                                 spl0();
4945:                                 goto loop;
4946:                         }
4947:                         spl0();
4948:                         notavail(bp);
4949:                         return(bp);
4950:                 }
4951:         }
4952:         spl6();
4953:         if (bfreelist.av_forw == &bfreelist) {
4954:                 bfreelist.b_flags =| B_WANTED;
4955:                 sleep(&bfreelist, PRIBIO);
4956:                 spl0();
4957:                 goto loop;
4958:         }
4959:         spl0();
4960:         notavail(bp = bfreelist.av_forw);
4961:         if (bp->b_flags & B_DELWRI) {
4962:                 bp->b_flags =| B_ASYNC;
4963:                 bwrite(bp);
4964:                 goto loop;
4965:         }
4966:         bp->b_flags = B_BUSY | B_RELOC;
4967:         bp->b_back->b_forw = bp->b_forw;
4968:         bp->b_forw->b_back = bp->b_back;
4969:         bp->b_forw = dp->b_forw;
4970:         bp->b_back = dp;
4971:         dp->b_forw->b_back = bp;
4972:         dp->b_forw = bp;
4973:         bp->b_dev = dev;
4974:         bp->b_blkno = blkno;
4975:         return(bp);
4976: }
4977: /* ---------------------------       */
4978: 
4979: /* Wait for I/O completion on the buffer; return errors
4980:  * to the user.
4981:  */
4982: iowait(bp)
4983: struct buf *bp;
4984: {
4985:         register struct buf *rbp;
4986: 
4987:         rbp = bp;
4988:         spl6();
4989:         while ((rbp->b_flags&B_DONE)==0)
4990:                 sleep(rbp, PRIBIO);
4991:         spl0();
4992:         geterror(rbp);
4993: }
4994: /* ---------------------------       */
4995: 
4996: /* Unlink a buffer from the available list and mark it busy.
4997:  * (internal interface)
4998:  */
4999: notavail(bp)
5000: struct buf *bp;
5001: {
5002:         register struct buf *rbp;
5003:         register int sps;
5004: 
5005:         rbp = bp;
5006:         sps = PS->integ;
5007:         spl6();
5008:         rbp->av_back->av_forw = rbp->av_forw;
5009:         rbp->av_forw->av_back = rbp->av_back;
5010:         rbp->b_flags =| B_BUSY;
5011:         PS->integ = sps;
5012: }
5013: /* ---------------------------       */
5014: 
5015: /* Mark I/O complete on a buffer, release it if I/O is asynchronous,
5016:  * and wake up anyone waiting for it.
5017:  */
5018: iodone(bp)
5019: struct buf *bp;
5020: {
5021:         register struct buf *rbp;
5022: 
5023:         rbp = bp;
5024:         if(rbp->b_flags&B_MAP)
5025:                 mapfree(rbp);
5026:         rbp->b_flags =| B_DONE;
5027:         if (rbp->b_flags&B_ASYNC)
5028:                 brelse(rbp);
5029:         else {
5030:                 rbp->b_flags =& ~B_WANTED;
5031:                 wakeup(rbp);
5032:         }
5033: }
5034: /* ---------------------------       */
5035: 
5036: /* Zero the core associated with a buffer.
5037:  */
5038: clrbuf(bp)
5039: int *bp;
5040: {
5041:         register *p;
5042:         register c;
5043: 
5044:         p = bp->b_addr;
5045:         c = 256;
5046:         do
5047:                 *p++ = 0;
5048:         while (--c);
5049: }
5050: /* ---------------------------       */
5051: 
5052: /* Initialize the buffer I/O system by freeing
5053:  * all buffers and setting all device buffer lists to empty.
5054:  */
5055: binit()
5056: {
5057:         register struct buf *bp;
5058:         register struct devtab *dp;
5059:         register int i;
5060:         struct bdevsw *bdp;
5061: 
5062:         bfreelist.b_forw = bfreelist.b_back =
5063:             bfreelist.av_forw = bfreelist.av_back = &bfreelist;
5064:         for (i=0; i<NBUF; i++) {
5065:                 bp = &buf[i];
5066:                 bp->b_dev = -1;
5067:                 bp->b_addr = buffers[i];
5068:                 bp->b_back = &bfreelist;
5069:                 bp->b_forw = bfreelist.b_forw;
5070:                 bfreelist.b_forw->b_back = bp;
5071:                 bfreelist.b_forw = bp;
5072:                 bp->b_flags = B_BUSY;
5073:                 brelse(bp);
5074:         }
5075:         i = 0;
5076:         for (bdp = bdevsw; bdp->d_open; bdp++) {
5077:                 dp = bdp->d_tab;
5078:                 if(dp) {
5079:                         dp->b_forw = dp;
5080:                         dp->b_back = dp;
5081:                 }
5082:                 i++;
5083:         }
5084:         nblkdev = i;
5085: }
5086: /* ---------------------------       */
5087: 
5088: /* Device start routine for disks
5089:  * and other devices that have the register
5090:  * layout of the older DEC controllers (RF, RK, RP, TM)
5091:  */
5092: #define IENABLE 0100
5093: #define WCOM    02
5094: #define RCOM    04
5095: #define GO      01
5096: devstart(bp, devloc, devblk, hbcom)
5097: struct buf *bp;
5098: int *devloc;
5099: {
5100:         register int *dp;
5101:         register struct buf *rbp;
5102:         register int com;
5103: 
5104:         dp = devloc;
5105:         rbp = bp;
5106:         *dp = devblk;                   /* block address */
5107:         *--dp = rbp->b_addr;            /* buffer address */
5108:         *--dp = rbp->b_wcount;          /* word count */
5109:         com = (hbcom<<8) | IENABLE | GO |
5110:                 ((rbp->b_xmem & 03) << 4);
5111:         if (rbp->b_flags&B_READ)        /* command + x-mem */
5112:                 com =| RCOM;
5113:         else
5114:                 com =| WCOM;
5115:         *--dp = com;
5116: }
5117: /* ---------------------------       */
5118: 
5119: /* startup routine for RH controllers. */
5120: #define RHWCOM  060
5121: #define RHRCOM  070
5122: 
5123: rhstart(bp, devloc, devblk, abae)
5124: struct buf *bp;
5125: int *devloc, *abae;
5126: {
5127:         register int *dp;
5128:         register struct buf *rbp;
5129:         register int com;
5130: 
5131:         dp = devloc;
5132:         rbp = bp;
5133:         if(cputype == 70)
5134:                 *abae = rbp->b_xmem;
5135:         *dp = devblk;                   /* block address */
5136:         *--dp = rbp->b_addr;            /* buffer address */
5137:         *--dp = rbp->b_wcount;          /* word count */
5138:         com = IENABLE | GO |
5139:                 ((rbp->b_xmem & 03) << 8);
5140:         if (rbp->b_flags&B_READ)        /* command + x-mem */
5141:                 com =| RHRCOM; else
5142:                 com =| RHWCOM;
5143:         *--dp = com;
5144: }
5145: /* ---------------------------       */
5146: 
5147: /*
5148:  * 11/70 routine to allocate the
5149:  * UNIBUS map and initialize for
5150:  * a unibus device.
5151:  * The code here and in
5152:  * rhstart assumes that an rh on an 11/70
5153:  * is an rh70 and contains 22 bit addressing.
5154:  */
5155: int     maplock;
5156: mapalloc(abp)
5157: struct buf *abp;
5158: {
5159:         register i, a;
5160:         register struct buf *bp;
5161: 
5162:         if(cputype != 70)
5163:                 return;
5164:         spl6();
5165:         while(maplock&B_BUSY) {
5166:                 maplock =| B_WANTED;
5167:                 sleep(&maplock, PSWP);
5168:         }
5169:         maplock =| B_BUSY;
5170:         spl0();
5171:         bp = abp;
5172:         bp->b_flags =| B_MAP;
5173:         a = bp->b_xmem;
5174:         for(i=16; i<32; i=+2)
5175:                 UBMAP->r[i+1] = a;
5176:         for(a++; i<48; i=+2)
5177:                 UBMAP->r[i+1] = a;
5178:         bp->b_xmem = 1;
5179: }
5180: /* ---------------------------       */
5181: 
5182: mapfree(bp)
5183: struct buf *bp;
5184: {
5185: 
5186:         bp->b_flags =& ~B_MAP;
5187:         if(maplock&B_WANTED)
5188:                 wakeup(&maplock);
5189:         maplock = 0;
5190: }
5191: /* ---------------------------       */
5192: 
5193: /*
5194:  * swap I/O
5195:  */
5196: swap(blkno, coreaddr, count, rdflg)
5197: {
5198:         register int *fp;
5199: 
5200:         fp = &swbuf.b_flags;
5201:         spl6();
5202:         while (*fp&B_BUSY) {
5203:                 *fp =| B_WANTED;
5204:                 sleep(fp, PSWP);
5205:         }
5206:         *fp = B_BUSY | B_PHYS | rdflg;
5207:         swbuf.b_dev = swapdev;
5208:         swbuf.b_wcount = - (count<<5);  /* 32 w/block */
5209:         swbuf.b_blkno = blkno;
5210:         swbuf.b_addr = coreaddr<<6;     /* 64 b/block */
5211:         swbuf.b_xmem = (coreaddr>>10) & 077;
5212:         (*bdevsw[swapdev>>8].d_strategy)(&swbuf);
5213:         spl6();
5214:         while((*fp&B_DONE)==0)
5215:                 sleep(fp, PSWP);
5216:         if (*fp&B_WANTED)
5217:                 wakeup(fp);
5218:         spl0();
5219:         *fp =& ~(B_BUSY|B_WANTED);
5220:         return(*fp&B_ERROR);
5221: }
5222: /* ---------------------------       */
5223: 
5224: /*
5225:  * make sure all write-behind blocks
5226:  * on dev (or NODEV for all)
5227:  * are flushed out.
5228:  * (from umount and update)
5229:  */
5230: bflush(dev)
5231: {
5232:         register struct buf *bp;
5233: 
5234: loop:
5235:         spl6();
5236:         for (bp = bfreelist.av_forw; bp != &bfreelist; bp = bp->av_forw) {
5237: 
5238:                 if (bp->b_flags&B_DELWRI && (dev == NODEV||dev==bp->b_dev)) {
5239:                         bp->b_flags =| B_ASYNC;
5240:                         notavail(bp);
5241:                         bwrite(bp);
5242:                         goto loop;
5243:                 }
5244:         }
5245:         spl0();
5246: }
5247: /* ---------------------------       */
5248: 
5249: /*
5250:  * Raw I/O. The arguments are
5251:  *      The strategy routine for the device
5252:  *      A buffer, which will always be a special buffer
5253:  *        header owned exclusively by the device for this purpose
5254:  *      The device number
5255:  *      Read/write flag
5256:  * Essentially all the work is computing physical addresses and
5257:  * validating them.
5258:  */
5259: physio(strat, abp, dev, rw)
5260: struct buf *abp;
5261: int (*strat)();
5262: {
5263:         register struct buf *bp;
5264:         register char *base;
5265:         register int nb;
5266:         int ts;
5267: 
5268:         bp = abp;
5269:         base = u.u_base;
5270:         /*
5271:          * Check odd base, odd count, and address wraparound
5272:          */
5273:         if (base&01 || u.u_count&01 || base>=base+u.u_count)
5274:                 goto bad;
5275:         ts = (u.u_tsize+127) & ~0177;
5276:         if (u.u_sep)
5277:                 ts = 0;
5278:         nb = (base>>6) & 01777;
5279:         /*
5280:          * Check overlap with text. (ts and nb now
5281:          * in 64-byte clicks)
5282:          */
5283:         if (nb < ts)
5284:                 goto bad;
5285:         /*
5286:          * Check that transfer is either entirely in the
5287:          * data or in the stack: that is, either
5288:          * the end is in the data or the start is in the stack
5289:          * (remember wraparound was already checked).
5290:          */
5291:         if ((((base+u.u_count)>>6)&01777) >= ts+u.u_dsize
5292:             && nb < 1024-u.u_ssize)
5293:                 goto bad;
5294:         spl6();
5295:         while (bp->b_flags&B_BUSY) {
5296:                 bp->b_flags =| B_WANTED;
5297:                 sleep(bp, PRIBIO);
5298:         }
5299:         bp->b_flags = B_BUSY | B_PHYS | rw;
5300:         bp->b_dev = dev;
5301:         /*
5302:          * Compute physical address by simulating
5303:          * the segmentation hardware.
5304:          */
5305:         bp->b_addr = base&077;
5306:         base = (u.u_sep? UDSA: UISA)->r[nb>>7] + (nb&0177);
5307:         bp->b_addr =+ base<<6;
5308:         bp->b_xmem = (base>>10) & 077;
5309:         bp->b_blkno = lshift(u.u_offset, -9);
5310:         bp->b_wcount = -((u.u_count>>1) & 077777);
5311:         bp->b_error = 0;
5312:         u.u_procp->p_flag =| SLOCK;
5313:         (*strat)(bp);
5314:         spl6();
5315:         while ((bp->b_flags&B_DONE) == 0)
5316:                 sleep(bp, PRIBIO);
5317:         u.u_procp->p_flag =& ~SLOCK;
5318:         if (bp->b_flags&B_WANTED)
5319:                 wakeup(bp);
5320:         spl0();
5321:         bp->b_flags =& ~(B_BUSY|B_WANTED);
5322:         u.u_count = (-bp->b_resid)<<1;
5323:         geterror(bp);
5324:         return;
5325:     bad:
5326:         u.u_error = EFAULT;
5327: }
5328: /* ---------------------------       */
5329: 
5330: /*
5331:  * Pick up the device's error number and pass it to the user;
5332:  * if there is an error but the number is 0 set a generalized
5333:  * code.  Actually the latter is always true because devices
5334:  * don't yet return specific errors.
5335:  */
5336: geterror(abp)
5337: struct buf *abp;
5338: {
5339:         register struct buf *bp;
5340: 
5341:         bp = abp;
5342:         if (bp->b_flags&B_ERROR)
5343:                 if ((u.u_error = bp->b_error)==0)
5344:                         u.u_error = EIO;
5345: }
5346: /* ---------------------------       */