device ioctl BLOCK_FLUSH_CACHE.
d_flush() is called on a device whenever a writable file is closed.
bdwrite(bp);
if (!bp->bf_busy)
bp->bf_dirty = false;
+ d_flush(bp->bf_dev);
}
}
}
return 0;
}
+int d_flush(uint16_t dev)
+{
+ if (!validdev(dev))
+ panic("d_flush: bad device");
+ return (*dev_tab[major(dev)].dev_ioctl) (minor(dev), BLOCK_FLUSH_CACHE, 0);
+}
+
/*
* No such device handler
*/
*/
#define SELECT_BEGIN 0x8000
#define SELECT_END 0x8001
+#define BLOCK_FLUSH_CACHE 0x8002
struct sysinfoblk {
uint8_t infosize; /* For expandability */
CODE1 int d_open(uint16_t dev, uint8_t flag);
CODE1 int d_close(uint16_t dev);
CODE1 int d_ioctl(uint16_t dev, uint16_t request, char *data);
+CODE1 int d_flush(uint16_t dev);
CODE1 int cdwrite(uint16_t dev, uint8_t flag);
CODE1 bool insq(struct s_queue *q, unsigned char c);
CODE1 bool remq(struct s_queue *q, unsigned char *cp);
{
int8_t oftindex;
inoptr ino;
+ uint16_t flush_dev = NO_DEVICE;
if (!(ino = getinode(uindex)))
return (-1);
if (ino->c_refs == 1 && of_tab[oftindex].o_refs == 1) {
if (isdevice(ino))
d_close((int) (ino->c_node.i_addr[0]));
+ if (getmode(ino) == F_REG && O_ACCMODE(of_tab[oftindex].o_access))
+ flush_dev = ino->c_dev;
#ifdef CONFIG_NET
- if (issocket(ino)
+ if (issocket(ino))
sock_close(ino);
#endif
}
udata.u_cloexec &= ~(1 << uindex);
oft_deref(oftindex);
+ /* if we closed a file in write mode, flush the device's cache after inode has been deferenced */
+ if(flush_dev != NO_DEVICE)
+ d_flush(flush_dev);
+
return (0);
}
if (ino->c_refs > 0 && (ino->c_flags & CDIRTY)) {
wr_inode(ino);
ino->c_flags &= ~CDIRTY;
+ /* WRS: also call d_flush(ino->c_dev) here? */
}
/* This now also indirectly does the superblocks as they