#	This is a shell archive.
#	Remove everything above and including the cut line.
#	Then run the rest of the file through sh.
#----cut here-----cut here-----cut here-----cut here----#
#!/bin/sh
# shar:    Shell Archiver
#	Run the following text with /bin/sh to create:
#	README
#	fcntl.patch
# This archive created: Tue Aug 18 14:16:48 1992
cat << \SHAR_EOF > README
README for fcntl.patch

This patch provides an almost complete implementation of the file locking
facilities provided by fcntl(). The only missing pieces are

- deadlock detection
- mandatory locks

Neither of these have a very high priority with me so I do not intend to
implement them soon.

This patch must be applied to Linux 0.97 Patch Level 1.

Apply it with:

cd /usr/src
patch -p0 <fcntl.patch 2>&1 | tee fcntlpatch.log

Enjoy!

Doug Evans, dje@sspiff.UUCP or dje@sspiff.ampr.ab.ca, 92Aug18
SHAR_EOF
cat << \SHAR_EOF > fcntl.patch
*** linux.pl1/fs/Makefile	Sat Aug  1 15:44:01 1992
--- linux/fs/Makefile	Sat Aug  8 00:02:15 1992
***************
*** 7,13 ****
  #
  # Note 2! The CFLAGS definitions are now in the main makefile...
  
! SUBDIRS	=minix ext msdos
  
  .c.s:
  	$(CC) $(CFLAGS) -S $<
--- 7,13 ----
  #
  # Note 2! The CFLAGS definitions are now in the main makefile...
  
! SUBDIRS	=minix ext msdos # xenix
  
  .c.s:
  	$(CC) $(CFLAGS) -S $<
***************
*** 18,24 ****
  
  OBJS=	open.o read_write.o inode.o file_table.o buffer.o super.o \
  	block_dev.o stat.o exec.o pipe.o namei.o fcntl.o ioctl.o \
! 	select.o fifo.o
  
  all: fs.o fssubdirs
  
--- 18,24 ----
  
  OBJS=	open.o read_write.o inode.o file_table.o buffer.o super.o \
  	block_dev.o stat.o exec.o pipe.o namei.o fcntl.o ioctl.o \
! 	select.o fifo.o locks.o
  
  all: fs.o fssubdirs
  
*** linux.pl1/fs/fcntl.c	Tue Aug 18 12:51:57 1992
--- linux/fs/fcntl.c	Tue Aug 18 13:11:57 1992
***************
*** 14,19 ****
--- 14,21 ----
  #include <linux/string.h>
  
  extern int sys_close(int fd);
+ extern int fcntl_getlk(unsigned int, struct flock *);
+ extern int fcntl_setlk(unsigned int, unsigned int, struct flock *);
  
  static int dupfd(unsigned int fd, unsigned int arg)
  {
***************
*** 56,85 ****
  	if (fd >= NR_OPEN || !(filp = current->filp[fd]))
  		return -EBADF;
  	switch (cmd) {
! 		case F_DUPFD:
! 			return dupfd(fd,arg);
! 		case F_GETFD:
! 			return (current->close_on_exec>>fd)&1;
! 		case F_SETFD:
! 			if (arg&1)
! 				current->close_on_exec |= (1<<fd);
! 			else
! 				current->close_on_exec &= ~(1<<fd);
! 			return 0;
! 		case F_GETFL:
! 			return filp->f_flags;
! 		case F_SETFL:
! 			filp->f_flags &= ~(O_APPEND | O_NONBLOCK);
! 			filp->f_flags |= arg & (O_APPEND | O_NONBLOCK);
! 			return 0;
! 		case F_GETLK:	case F_SETLK:	case F_SETLKW:
! 			return -ENOSYS;
! 		default:
! 			/* sockets need a few special fcntls. */
! 			if (S_ISSOCK (filp->f_inode->i_mode))
! 			  {
! 			     return (sock_fcntl (filp, cmd, arg));
! 			  }
! 			return -EINVAL;
  	}
  }
--- 58,89 ----
  	if (fd >= NR_OPEN || !(filp = current->filp[fd]))
  		return -EBADF;
  	switch (cmd) {
! 	case F_DUPFD:
! 		return dupfd(fd,arg);
! 	case F_GETFD:
! 		return (current->close_on_exec>>fd)&1;
! 	case F_SETFD:
! 		if (arg&1)
! 			current->close_on_exec |= (1<<fd);
! 		else
! 			current->close_on_exec &= ~(1<<fd);
! 		return 0;
! 	case F_GETFL:
! 		return filp->f_flags;
! 	case F_SETFL:
! 		filp->f_flags &= ~(O_APPEND | O_NONBLOCK);
! 		filp->f_flags |= arg & (O_APPEND | O_NONBLOCK);
! 		return 0;
! 	case F_GETLK:
! 		return fcntl_getlk(fd, (struct flock *) arg);
! 	case F_SETLK:
! 		return fcntl_setlk(fd, cmd, (struct flock *) arg);
! 	case F_SETLKW:
! 		return fcntl_setlk(fd, cmd, (struct flock *) arg);
! 	default:
! 		/* sockets need a few special fcntls. */
! 		if (S_ISSOCK (filp->f_inode->i_mode))
! 		      return sock_fcntl (filp, cmd, arg);
! 		return -EINVAL;
  	}
  }
*** linux.pl1/fs/open.c	Tue Aug 18 12:51:57 1992
--- linux/fs/open.c	Mon Aug 17 13:31:22 1992
***************
*** 17,22 ****
--- 17,24 ----
  #include <linux/tty.h>
  #include <asm/segment.h>
  
+ extern void fcntl_remove_locks(struct task_struct *, struct file *);
+ 
  struct file_operations * chrdev_fops[MAX_CHRDEV] = {
  	NULL,
  };
***************
*** 339,347 ****
  }
  
  static int
! close_fp (struct file *filp)
! {
!    struct inode *inode;
  
  	if (filp->f_count == 0) {
  		printk("Close: file count is 0\n");
--- 341,349 ----
  }
  
  static int
! close_fp(struct file *filp)
! {	
! 	struct inode * inode;
  
  	if (filp->f_count == 0) {
  		printk("Close: file count is 0\n");
***************
*** 348,359 ****
  		return 0;
  	}
  
  	if (filp->f_count > 1) {
  		filp->f_count--;
  		return 0;
  	}
!      
! 	inode = filp->f_inode;
  	if (filp->f_op && filp->f_op->release)
  		filp->f_op->release(inode,filp);
  
--- 350,364 ----
  		return 0;
  	}
  
+ 	inode = filp->f_inode;
+ 	if (S_ISREG(inode->i_mode))
+ 		fcntl_remove_locks(current, filp);
+ 
  	if (filp->f_count > 1) {
  		filp->f_count--;
  		return 0;
  	}
! 
  	if (filp->f_op && filp->f_op->release)
  		filp->f_op->release(inode,filp);
  
*** linux.pl1/fs/locks.c	Tue Aug 11 14:27:34 1992
--- linux/fs/locks.c	Tue Aug 18 13:18:51 1992
***************
*** 0 ****
--- 1,451 ----
+ /*
+  *  linux/fs/locks.c
+  *
+  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
+  *  Doug Evans, 92Aug07, dje@sspiff.UUCP.
+  *
+  * FIXME: two things aren't handled yet:
+  *	- deadlock detection/avoidance (of dubious merit, but since it's in
+  *	  the definition, I guess it should be provided eventually)
+  *	- mandatory locks (requires lots of changes elsewhere)
+  *
+  * FIXME: Should vector locking operations through table based on filesystem
+  *	type. NFS mounts obviously won't be able to use this code.
+  */
+ 
+ #include <asm/segment.h>
+ 
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+ 
+ #define OFFSET_MAX	0x7fffffff	/* FIXME: move elsewhere? */
+ 
+ static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
+ static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
+ static int overlap(struct file_lock *fl1, struct file_lock *fl2);
+ static int lock_it(struct file *filp, struct file_lock *caller);
+ static int unlock_it(struct file *filp, struct file_lock *caller);
+ static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template);
+ static void free_lock(struct file *filp, struct file_lock *fl);
+ 
+ static struct file_lock file_lock_table[NR_FILE_LOCKS];
+ static struct file_lock *file_lock_free_list;
+ 
+ /*
+  * Called at boot time to initialize the lock table ...
+  */
+ 
+ void fcntl_init_locks(void)
+ {
+ 	struct file_lock *fl;
+ 
+ 	for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
+ 		fl->fl_next = fl + 1;
+ 		fl->fl_owner = NULL;
+ 	}
+ 	file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
+ 	file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
+ 	file_lock_free_list = &file_lock_table[0];
+ }
+ 
+ int fcntl_getlk(unsigned int fd, struct flock *l)
+ {	
+ 	struct flock flock;
+ 	struct file *filp;
+ 	struct file_lock *fl,file_lock;
+ 
+ 	if (fd >= NR_OPEN || !(filp = current->filp[fd]))
+ 		return -EBADF;
+ 	verify_area(l, sizeof(*l));
+ 	memcpy_fromfs(&flock, l, sizeof(flock));
+ 	if (flock.l_type == F_UNLCK)
+ 		return -EINVAL;
+ 	if (!copy_flock(filp, &file_lock, &flock))
+ 		return -EINVAL;
+ 
+ 	for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
+ 		if (conflict(&file_lock, fl)) {
+ 			flock.l_pid = fl->fl_owner->pid;
+ 			flock.l_start = fl->fl_start;
+ 			flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
+ 				fl->fl_end - fl->fl_start + 1;
+ 			/* Note: this will always be SEEK_SET. */
+ 			flock.l_whence = fl->fl_whence;
+ 			flock.l_type = fl->fl_type;
+ 			memcpy_tofs(l, &flock, sizeof(flock));
+ 			return 0;
+ 		}
+ 	}
+ 
+ 	flock.l_type = F_UNLCK;			/* no conflict found */
+ 	memcpy_tofs(l, &flock, sizeof(flock));
+ 	return 0;
+ }
+ 
+ /*
+  * This function implements both F_SETLK and F_SETLKW.
+  */
+ 
+ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
+ {	
+ 	struct file *filp;
+ 	struct file_lock *fl,file_lock;
+ 	struct flock flock;
+ 
+ 	/*
+ 	 * Get arguments and validate them ...
+ 	 */
+ 
+ 	if (fd >= NR_OPEN || !(filp = current->filp[fd]))
+ 		return -EBADF;
+ 	verify_area(l, sizeof(*l));
+ 	memcpy_fromfs(&flock, l, sizeof(flock));
+ 	if (!copy_flock(filp, &file_lock, &flock))
+ 		return -EINVAL;
+ 	switch (file_lock.fl_type) {
+ 	case F_RDLCK :
+ 		if (!(filp->f_mode & 1))
+ 			return -EBADF;
+ 		break;
+ 	case F_WRLCK :
+ 		if (!(filp->f_mode & 2))
+ 			return -EBADF;
+ 		break;
+ 	case F_UNLCK :
+ 		break;
+ 	}
+ 
+ 	/*
+ 	 * F_UNLCK needs to be handled differently ...
+ 	 */
+ 
+ 	if (file_lock.fl_type == F_UNLCK)
+ 		return unlock_it(filp, &file_lock);
+ 
+ 	/*
+ 	 * Scan for a conflicting lock ...
+ 	 */
+ 
+ repeat:
+ 	for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
+ 		if (!conflict(&file_lock, fl))
+ 			continue;
+ 		/*
+ 		 * File is locked by another process. If this is F_SETLKW
+ 		 * wait for the lock to be released.
+ 		 * FIXME: We need to check for deadlocks here.
+ 		 */
+ 		if (cmd == F_SETLKW) {
+ 			interruptible_sleep_on(&fl->fl_wait);
+ 			goto repeat;
+ 		}
+ 		return -EAGAIN;
+ 	}
+ 
+ 	/*
+ 	 * Lock doesn't conflict with any other lock ...
+ 	 */
+ 
+ 	return lock_it(filp, &file_lock);
+ }
+ 
+ /*
+  * This function is called when the file is closed.
+  */
+ 
+ void fcntl_remove_locks(struct task_struct *task, struct file *filp)
+ {
+ 	struct file_lock *fl,*next;
+ 
+ 	for (fl = filp->f_inode->i_flock; fl != NULL; ) {
+ 		/*
+ 		 * If this one is freed, <fl_next> gets clobbered when the
+ 		 * entry is moved to the free list, so grab it now ...
+ 		 */
+ 		next = fl->fl_next;
+ 		if (fl->fl_owner == task)
+ 			free_lock(filp, fl);
+ 		fl = next;
+ 	}
+ }
+ 
+ /*
+  * Verify a "struct flock" and copy it to a "struct file_lock" ...
+  * Result is a boolean indicating success.
+  */
+ 
+ static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
+ {
+ 	off_t start;
+ 
+ 	if (!filp->f_inode)	/* just in case */
+ 		return 0;
+ 	if (!S_ISREG(filp->f_inode->i_mode))
+ 		return 0;
+ 	if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK)
+ 		return 0;
+ 	switch (l->l_whence) {
+ 	case 0 /*SEEK_SET*/ : start = 0; break;
+ 	case 1 /*SEEK_CUR*/ : start = filp->f_pos; break;
+ 	case 2 /*SEEK_END*/ : start = filp->f_inode->i_size; break;
+ 	default : return 0;
+ 	}
+ 	if ((start += l->l_start) < 0 || l->l_len < 0)
+ 		return 0;
+ 	fl->fl_type = l->l_type;
+ 	fl->fl_start = start;	/* we record the absolute position */
+ 	fl->fl_whence = 0;	/* FIXME: do we record <l_start> as passed? */
+ 	if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
+ 		fl->fl_end = OFFSET_MAX;
+ 	fl->fl_owner = current;
+ 	fl->fl_wait = NULL;		/* just for cleanliness */
+ 	return 1;
+ }
+ 
+ /*
+  * Determine if lock <sys_fl> blocks lock <caller_fl> ...
+  */
+ 
+ static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
+ {
+ 	if (caller_fl->fl_owner == sys_fl->fl_owner)
+ 		return 0;
+ 	if (!overlap(caller_fl, sys_fl))
+ 		return 0;
+ 	switch (caller_fl->fl_type) {
+ 	case F_RDLCK :
+ 		return sys_fl->fl_type != F_RDLCK;
+ 	case F_WRLCK :
+ 		return 1;	/* overlapping region not owned by caller */
+ 	}
+ 	return 0;	/* shouldn't get here, but just in case */
+ }
+ 
+ static int overlap(struct file_lock *fl1, struct file_lock *fl2)
+ {
+ 	if (fl1->fl_start <= fl2->fl_start) {
+ 		return fl1->fl_end >= fl2->fl_start;
+ 	} else {
+ 		return fl2->fl_end >= fl1->fl_start;
+ 	}
+ }
+ 
+ /*
+  * Add a lock to a file ...
+  * Result is 0 for success or -ENOLCK.
+  *
+  * We try to be real clever here and always minimize the number of table
+  * entries we use. For example we merge adjacent locks whenever possible. This
+  * consumes a bit of cpu and code space, is it really worth it? Beats me.
+  *
+  * I've tried to keep the following as small and simple as possible. If you can
+  * make it smaller or simpler, please do. /dje 92Aug11
+  *
+  * WARNING: We assume the lock doesn't conflict with any other lock.
+  */
+ 
+ static int lock_it(struct file *filp, struct file_lock *caller)
+ {
+ 	struct file_lock *fl,*new;
+ 
+ 	/*
+ 	 * It's easier if we allocate a slot for the lock first, and then
+ 	 * release it later if we have to (IE: if it can be merged with
+ 	 * another). This way the for() loop always knows that <caller> is an
+ 	 * existing entry. This will cause the routine to fail unnecessarily
+ 	 * in rare cases, but perfection can be pushed too far. :-)
+ 	 */
+ 
+ 	if ((caller = alloc_lock(filp, caller)) == NULL)
+ 		return -ENOLCK;
+ 
+ 	/*
+ 	 * First scan to see if we are changing/augmenting an existing lock ...
+ 	 */
+ 
+ 	for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
+ 		if (caller->fl_owner != fl->fl_owner)
+ 			continue;
+ 		if (caller == fl)
+ 			continue;
+ 		if (!overlap(caller, fl)) {
+ 			/*
+ 			 * Detect adjacent regions (if same lock type) ...
+ 			 */
+ 			if (caller->fl_type != fl->fl_type)
+ 				continue;
+ 			if (caller->fl_end + 1 == fl->fl_start) {
+ 				fl->fl_start = caller->fl_start;
+ 				free_lock(filp, caller);
+ 				caller = fl;
+ 				/* must continue, may overlap others now */
+ 			} else if (caller->fl_start - 1 == fl->fl_end) {
+ 				fl->fl_end = caller->fl_end;
+ 				free_lock(filp, caller);
+ 				caller = fl;
+ 				/* must continue, may overlap others now */
+ 			}
+ 			continue;
+ 		}
+ 		/*
+ 		 * We've found an overlapping region. Is it a change of lock
+ 		 * type, or are we changing the size of the locked space?
+ 		 */
+ 		if (caller->fl_type != fl->fl_type) {
+ 			if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
+ 				/*
+ 				 * The new lock splits the old one in two ...
+ 				 * <fl> is the bottom piece, <caller> is the
+ 				 * new lock, and <new> is the top piece.
+ 				 */
+ 				if ((new = alloc_lock(filp, fl)) == NULL) {
+ 					free_lock(filp, caller);
+ 					return -ENOLCK;
+ 				}
+ 				fl->fl_end = caller->fl_start - 1;
+ 				new->fl_start = caller->fl_end + 1;
+ 				return 0;
+ 			}
+ 			if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
+ 				/*
+ 				 * The new lock completely replaces old one ...
+ 				 */
+ 				free_lock(filp, fl);
+ 				return 0;
+ 			}
+ 			if (caller->fl_end < fl->fl_end) {
+ 				fl->fl_start = caller->fl_end + 1;
+ 				/* must continue, may be more overlaps */
+ 			} else if (caller->fl_start > fl->fl_start) {
+ 				fl->fl_end = caller->fl_start - 1;
+ 				/* must continue, may be more overlaps */
+ 			} else {
+ 				printk("lock_it: program bug: unanticipated overlap\n");
+ 				free_lock(filp, caller);
+ 				return -ENOLCK;
+ 			}
+ 		} else {	/* The new lock augments an existing lock ... */
+ 			int grew = 0;
+ 
+ 			if (caller->fl_start < fl->fl_start) {
+ 				fl->fl_start = caller->fl_start;
+ 				grew = 1;
+ 			}
+ 			if (caller->fl_end > fl->fl_end) {
+ 				fl->fl_end = caller->fl_end;
+ 				grew = 1;
+ 			}
+ 			free_lock(filp, caller);
+ 			caller = fl;
+ 			if (!grew)
+ 				return 0;
+ 			/* must continue, may be more overlaps */
+ 		}
+ 	}
+ 
+ 	/*
+ 	 * New lock doesn't overlap any regions ...
+ 	 * alloc_lock() has already been called, so we're done!
+ 	 */
+ 
+ 	return 0;
+ }
+ 
+ /*
+  * Handle F_UNLCK ...
+  * Result is 0 for success, or -EINVAL or -ENOLCK.
+  * ENOLCK can happen when a lock is split into two.
+  */
+ 
+ static int unlock_it(struct file *filp, struct file_lock *caller)
+ {
+ 	int one_unlocked = 0;
+ 	struct file_lock *fl,*next;
+ 
+ 	for (fl = filp->f_inode->i_flock; fl != NULL; ) {
+ 		if (caller->fl_owner != fl->fl_owner || !overlap(caller, fl)) {
+ 			fl = fl->fl_next;
+ 			continue;
+ 		}
+ 		one_unlocked = 1;
+ 		if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
+ 			/*
+ 			 * Lock is split in two ...
+ 			 * <fl> is the bottom piece, <next> is the top piece.
+ 			 */
+ 			if ((next = alloc_lock(filp, fl)) == NULL)
+ 				return -ENOLCK;
+ 			fl->fl_end = caller->fl_start - 1;
+ 			next->fl_start = caller->fl_end + 1;
+ 			return 0;
+ 		}
+ 		if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
+ 			/*
+ 			 * <fl_next> gets clobbered when the entry is moved to
+ 			 * the free list, so grab it now ...
+ 			 */
+ 			next = fl->fl_next;
+ 			free_lock(filp, fl);
+ 			fl = next;
+ 		}
+ 		if (caller->fl_start == fl->fl_start && caller->fl_end == fl->fl_end)
+ 			return 0;	/* no more to be found */
+ 		/* must continue, there may be more to unlock */
+ 	}
+ 
+ 	return one_unlocked ? 0 : -EINVAL;
+ }
+ 
+ static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template)
+ {
+ 	struct file_lock *new;
+ 
+ 	if (file_lock_free_list == NULL)
+ 		return NULL;			/* no available entry */
+ 	if (file_lock_free_list->fl_owner != NULL)
+ 		panic("alloc_lock: broken free list\n");
+ 
+ 	new = file_lock_free_list;		/* remove from free list */
+ 	file_lock_free_list = file_lock_free_list->fl_next;
+ 
+ 	*new = *template;
+ 
+ 	new->fl_next = filp->f_inode->i_flock;	/* insert into file's list */
+ 	filp->f_inode->i_flock = new;
+ 
+ 	new->fl_owner = current;	/* FIXME: needed? */
+ 	new->fl_wait = NULL;
+ 	return new;
+ }
+ 
+ static void free_lock(struct file *filp, struct file_lock *fl)
+ {
+ 	struct file_lock **fl_p;
+ 
+ 	if (fl->fl_owner == NULL)	/* sanity check */
+ 		panic("free_lock: broken lock list\n");
+ 
+ 	/*
+ 	 * We only use a singly linked list to save some memory space
+ 	 * (the only place we'd use a doubly linked list is here).
+ 	 */
+ 
+ 	for (fl_p = &filp->f_inode->i_flock; *fl_p != NULL; fl_p = &(*fl_p)->fl_next) {
+ 		if (*fl_p == fl)
+ 			break;
+ 	}
+ 	if (*fl_p == NULL) {
+ 		printk("free_lock: lock is not in file's lock list\n");
+ 	} else {
+ 		*fl_p = (*fl_p)->fl_next;
+ 	}
+ 
+ 	fl->fl_next = file_lock_free_list;	/* add to free list */
+ 	file_lock_free_list = fl;
+ 	fl->fl_owner = NULL;			/* for sanity checks */
+ 
+ 	wake_up(&fl->fl_wait);
+ }
*** linux.pl1/fs/super.c	Sat Aug  1 13:09:35 1992
--- linux/fs/super.c	Mon Aug 17 13:54:39 1992
***************
*** 12,17 ****
--- 12,18 ----
  #include <linux/minix_fs.h>
  #include <linux/ext_fs.h>
  #include <linux/msdos_fs.h>
+ /*#include <linux/xenix_fs.h>*/
  #include <linux/kernel.h>
  #include <linux/stat.h>
  #include <linux/errno.h>
***************
*** 21,26 ****
--- 22,28 ----
  
  int sync_dev(int dev);
  void wait_for_keypress(void);
+ void fcntl_init_locks(void);
  
  /* set_bit uses setb, as gas doesn't recognize setc */
  #define set_bit(bitnr,addr) ({ \
***************
*** 35,42 ****
  /* Move into include file later */
  
  static struct file_system_type file_systems[] = {
  	{minix_read_super,"minix"},
! 	{ext_read_super,"ext"},
  	{msdos_read_super,"msdos"},
  	{NULL,NULL}
  };
--- 37,45 ----
  /* Move into include file later */
  
  static struct file_system_type file_systems[] = {
+ 	/*{xenix_read_super,"xenix"},*/
  	{minix_read_super,"minix"},
! 	/*{ext_read_super,"ext"},*/
  	{msdos_read_super,"msdos"},
  	{NULL,NULL}
  };
***************
*** 314,319 ****
--- 317,323 ----
  		panic("bad i-node size");
  	for(i=0;i<NR_FILE;i++)
  		file_table[i].f_count=0;
+ 	fcntl_init_locks();
  	if (MAJOR(ROOT_DEV) == 2) {
  		printk("Insert root floppy and press ENTER");
  		wait_for_keypress();
*** linux.pl1/include/linux/fcntl.h	Tue Aug 18 13:04:28 1992
--- linux/include/linux/fcntl.h	Tue Aug 18 13:02:31 1992
***************
*** 24,32 ****
  #define F_SETFD		2	/* set f_flags */
  #define F_GETFL		3	/* more flags (cloexec) */
  #define F_SETFL		4
! #define F_GETLK		5	/* not implemented */
! #define F_SETLK		6
! #define F_SETLKW	7
  
  #define F_SETOWN	8	/*  for sockets. */
  #define F_GETOWN	9	/*  for sockets. */
--- 24,32 ----
  #define F_SETFD		2	/* set f_flags */
  #define F_GETFL		3	/* more flags (cloexec) */
  #define F_SETFL		4
! #define F_GETLK		5	/* get conflicting lock */
! #define F_SETLK		6	/* set lock, don't wait */
! #define F_SETLKW	7	/* set lock, wait if necessary */
  
  #define F_SETOWN	8	/*  for sockets. */
  #define F_GETOWN	9	/*  for sockets. */
*** linux.pl1/include/linux/limits.h	Tue Aug 18 12:52:50 1992
--- linux/include/linux/limits.h	Mon Aug 17 13:34:52 1992
***************
*** 8,13 ****
--- 8,14 ----
  #define NR_FILE 128
  #define NR_SUPER 8
  #define NR_HASH 997
+ #define NR_FILE_LOCKS 32
  #define BLOCK_SIZE 1024
  #define BLOCK_SIZE_BITS 10
  #define MAX_CHRDEV 16
SHAR_EOF
#	End of shell archive
exit 0
