·设备初始化、释放;
·提供各类设备服务;
·负责内核和设备之间的数据交换;
·检测和处理设备工作过程中出现的错误。
Linux下的设备驱动程序被组织为一组完成不同任务的函数的集合,通过这些函数使得Windows的设备操作犹如文件一般。在应用程序看来,硬件设备只是一个设备文件,应用程序可以象操作普通文件一样对硬件设备进行操作,如OPEN ()、CLOSE ()、read ()、write () 等。
Linux主要将设备分为二类:字符设备和块设备。字符设备是指设备发送和接收数据以字符的形式进行;而块设备则以整个数据缓冲区的形式进行。在对字符设备发出读/写请求时,实际的硬件I/O一般就紧接着发生了;而块设备则不然,它利用一块系统内存作缓冲区,当用户进程对设备请求能满足用户的要求,就返回请求的数据,如果不能,就调用请求函数来进行实际的I/O操作。块设备主要针对磁盘等慢速设备。
1.内存分配
由于Linux驱动程序在内核中运行,因此在设备驱动程序需要申请/释放内存时,不能使用用户级的malloc/FREE函数,而需由内核级的函数kmalloc/kFREE () 来实现,kmalloc()函数的原型为:
void kmalloc (SIZE_t SIZE ,int priority);
参数SIZE为申请分配内存的字节数,kmalloc最多只能开辟128K的内存;参数priority说明若kmalloc()不能马上分配内存时用户进程要采用的动作:
GFP_KERNEL 表示等待,即等kmalloc()函数将一些内存安排到交换区来满足你的内存需要,GFP_ATOMIC 表示不等待,如不能立即分配到内存则返回0 值;函数的返回值指向已分配内存的起始地址,出错时,返回0。
kmalloc ()分配的内存需用kFREE()函数来释放,kFREE ()被定义为:
# define kFREE (n) kFREE_s( (n) ,0)
其中kFREE_s () 函数原型为:
void kFREE_s (void * ptr ,int SIZE);
参数ptr为kmalloc()返回的已分配内存的指针,SIZE是要释放内存的字节数,若为0 时,由内核自动确定内存的大小。
2.中断
许多设备涉及到中断操作,因此,在这样的设备的驱动程序中需要对硬件产生的中断请求提供中断服务程序。与注册基本入口点一样,驱动程序也要请求内核将特定的中断请求和中断服务程序联系在一起。在Linux中,用request_irq()函数来实现请求:
int request_irq (unsigned int irq ,void( * HANDLEr) int ,unsigned long TYPE ,CHAR * NAME);
参数irq为要中断请求号,参数HANDLEr为指向中断服务程序的指针,参数TYPE 用来确定是正常中断还是快速中断(正常中断指中断服务子程序返回后,内核可以执行调度程序来确定将运行哪一个进程;而快速中断是指中断服务子程序返回后,立即执行被中断程序,正常中断TYPE 取值为0 ,快速中断TYPE 取值为SA_INTERRUPT),参数NAME是设备驱动程序的名称。
4.块设备驱动
块设备驱动程序的编写是一个浩繁的工程,其难度远超过字符设备,上千行的代码往往只能搞定一个简单的块设备,而数十行代码就可能搞定一个字符设备。因此,非得有相当的基本功才能完成此项工作。下面先给出一个实例,即mtdBLOCK块设备的驱动。我们通过分析此实例中的代码来说明块设备驱动程序的写法(由于篇幅的关系,大量的代码被省略,只保留了必要的主干):
#include <Linux/config.h>
#include <Linux/devfs_fs_kernel.h>
static void mtd_notify_add(struct mtd_info* mtd);
static void mtd_notify_remove(struct mtd_info* mtd);
static struct mtd_notifier notifier = {
mtd_notify_add,
mtd_notify_remove,
NULL
};
static devfs_HANDLE_t devfs_dir_HANDLE = NULL;
static devfs_HANDLE_t devfs_rw_HANDLE[MAX_MTD_DEVICES];
static struct mtdblk_dev {
struct mtd_info *mtd; /* Locked */
int count;
struct semaphore cache_sem;
unsigned CHAR *cache_data;
unsigned long cache_offset;
unsigned int cache_SIZE;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
} *mtdblks[MAX_MTD_DEVICES];
static spinlock_t mtdblks_lock;
/* this lock is used just in kernels >= 2.5.x */
static spinlock_t mtdBLOCK_lock;
static int mtd_SIZEs[MAX_MTD_DEVICES];
static int mtd_blkSIZEs[MAX_MTD_DEVICES];
static void erase_callback(struct erase_info *done)
{
wait_queue_HEAD_t *wait_q = (wait_queue_HEAD_t *)done->priv;
wake_up(wait_q);
}
static int erase_write (struct mtd_info *mtd, unsigned long pos,
int len, const CHAR *buf)
{
struct erase_info erase;
DECLARE_WAITQUEUE(wait, current);
wait_queue_HEAD_t wait_q;
SIZE_t retlen;
int ret;
/*
* First, let's erase the FLASH BLOCK.
*/
init_waitqueue_HEAD(&wait_q);
erase.mtd = mtd;
erase.callback = erase_callback;
erase.addr = pos;
erase.len = len;
erase.priv = (u_long)&wait_q;
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
ret = MTD_ERASE(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
printk (KERN_WARNING "mtdBLOCK: erase of region [0x%lx, 0x%x] " "on /"%s/" failed/n",
pos, len, mtd->NAME);
return ret;
}
schedule(); /* Wait for erase to FINISH. */
remove_wait_queue(&wait_q, &wait);
/*
* Next, writhe data to FLASH.
*/
ret = MTD_WRITE (mtd, pos, len, &retlen, buf);
if (ret)
return ret;
if (retlen != len)
return -EIO;
return 0;
}
static int write_cached_data (struct mtdblk_dev *mtdblk)
{
struct mtd_info *mtd = mtdblk->mtd;
int ret;
if (mtdblk->cache_state != STATE_DIRTY)
return 0;
DEBUG(MTD_DEBUG_LEVEL2, "mtdBLOCK: writing cached data for /"%s/" "
"at 0x%lx, SIZE 0x%x/n", mtd->NAME,
mtdblk->cache_offset, mtdblk->cache_SIZE);
ret = erase_write (mtd, mtdblk->cache_offset,
mtdblk->cache_SIZE, mtdblk->cache_data);
if (ret)
return ret;
mtdblk->cache_state = STATE_EMPTY;
return 0;
}
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const CHAR *buf)
{
…
}
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, CHAR *buf)
{
…
}
static int mtdBLOCK_OPEN(struct inode *inode, struct file *file)
{
…
}
static release_t mtdBLOCK_release(struct inode *inode, struct file *file)
{
int dev;
struct mtdblk_dev *mtdblk;
DEBUG(MTD_DEBUG_LEVEL1, "mtdBLOCK_release/n");
if (inode == NULL)
release_return(-ENODEV);
dev = minor(inode->i_rdev);
mtdblk = mtdblks[dev];
down(&mtdblk->cache_sem);
write_cached_data(mtdblk);
up(&mtdblk->cache_sem);
spin_lock(&mtdblks_lock);
if (!--mtdblk->count) {
/* It was the last usage. FREE the DEVICE */
mtdblks[dev] = NULL;
spin_unlock(&mtdblks_lock);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
put_mtd_DEVICE(mtdblk->mtd);
vFREE(mtdblk->cache_data);
kFREE(mtdblk);
} ELSE {
spin_unlock(&mtdblks_lock);
}
DEBUG(MTD_DEBUG_LEVEL1, "ok/n");
BLK_DEC_USE_COUNT;
release_return(0);
}
/*
* This is a special request_fn because it is executed in a PROCESS context
* to be ABLE to sleep independently of the caller. The
* io_request_lock (for <2.5) or queue_lock (for >=2.5) is held upon entry
* and exit. The HEAD of our request queue is considered active so there is
* no NEED to dequeue requests before we are done.
*/
static void HANDLE_mtdBLOCK_request(void)
{
struct request *req;
struct mtdblk_dev *mtdblk;
unsigned int res;
for (;;) {
INIT_REQUEST;
req = CURRENT;
spin_unlock_irq(QUEUE_LOCK(QUEUE));
mtdblk = mtdblks[minor(req->rq_dev)];
res = 0;
if (minor(req->rq_dev) >= MAX_MTD_DEVICES)
panic("%s : minor out of bound", __FUNCTION__);
if (!IS_REQ_CMD(req))
goto end_req;
if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->SIZE >> 9))
goto end_req;
// HANDLE the request
SWITCH (rq_data_dir(req))
{
int err;
CASE READ:
down(&mtdblk->cache_sem);
err = do_cached_read (mtdblk, req->sector << 9,
req->current_nr_sectors << 9,
req->buffer);
up(&mtdblk->cache_sem);
if (!err)
res = 1;
break;
CASE WRITE:
// Read ONLY DEVICE
if ( !(mtdblk->mtd->flags & MTD_WRITEABLE) )
break;
// Do the write
down(&mtdblk->cache_sem);
err = do_cached_write (mtdblk, req->sector << 9,req->current_nr_sectors << 9, req->buffer);
up(&mtdblk->cache_sem);
if (!err)
res = 1;
break;
}
end_req:
spin_lock_irq(QUEUE_LOCK(QUEUE));
end_request(res);
}
}
static volatile int leaving = 0;
static DECLARE_MUTEX_LOCKED(thread_sem);
static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
int mtdBLOCK_thread(void *DUMMY)
{
…
}
#define RQFUNC_ARG request_queue_t *q
static void mtdBLOCK_request(RQFUNC_ARG)
{
/* Don't do anything, except wake the thread if necessary */
wake_up(&thr_wq);
}
static int mtdBLOCK_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg)
{
struct mtdblk_dev *mtdblk;
mtdblk = mtdblks[minor(inode->i_rdev)];
SWITCH (cmd) {
CASE BLKGETSIZE: /* Return DEVICE SIZE */
return put_user((mtdblk->mtd->SIZE >> 9), (unsigned long *) arg);
CASE BLKFLSBUF:
if(!capABLE(CAP_SYS_ADMIN))
return -EACCES;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
down(&mtdblk->cache_sem);
write_cached_data(mtdblk);
up(&mtdblk->cache_sem);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
return 0;
default:
return -EINVAL;
}
}
static struct BLOCK_DEVICE_operations mtd_fops =
{
owner: THIS_MODULE,
OPEN: mtdBLOCK_OPEN,
release: mtdBLOCK_release,
ioctl: mtdBLOCK_ioctl
};
static void mtd_notify_add(struct mtd_info* mtd)
{
…
}
static void mtd_notify_remove(struct mtd_info* mtd)
{
if (!mtd || mtd->TYPE == MTD_ABSENT)
return;
devfs_unregister(devfs_rw_HANDLE[mtd->index]);
}
int __init init_mtdBLOCK(void)
{
int i;
spin_lock_init(&mtdblks_lock);
/* this lock is used just in kernels >= 2.5.x */
spin_lock_init(&mtdBLOCK_lock);
#ifdef CONFIG_DEVFS_FS
if (devfs_register_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME, &mtd_fops))
{
printk(KERN_NOTICE "Can't allocate MAJOR NUMBER %d for MEMORY Technology DEVICEs./n",
MTD_BLOCK_MAJOR);
return -EAGAIN;
}
devfs_dir_HANDLE = devfs_mk_dir(NULL, DEVICE_NAME, NULL);
register_mtd_user(¬ifier);
#ELSE
if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
printk(KERN_NOTICE "Can't allocate MAJOR NUMBER %d for MEMORY Technology DEVICEs./n",
MTD_BLOCK_MAJOR);
return -EAGAIN;
}
#endif
/* We fill it in at OPEN() time. */
for (i=0; i< MAX_MTD_DEVICES; i++) {
mtd_SIZEs[i] = 0;
mtd_blkSIZEs[i] = BLOCK_SIZE;
}
init_waitqueue_HEAD(&thr_wq);
/* Allow the BLOCK SIZE to default to BLOCK_SIZE. */
blkSIZE_SIZE[MAJOR_NR] = mtd_blkSIZEs;
blk_SIZE[MAJOR_NR] = mtd_SIZEs;
BLK_INIT_QUEUE(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdBLOCK_request, &mtdBLOCK_lock);
kernel_thread (mtdBLOCK_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
return 0;
}
static void __exit cleanup_mtdBLOCK(void)
{
leaving = 1;
wake_up(&thr_wq);
down(&thread_sem);
#ifdef CONFIG_DEVFS_FS
unregister_mtd_user(¬ifier);
devfs_unregister(devfs_dir_HANDLE);
devfs_unregister_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME);
#ELSE
unregister_blkdev(MAJOR_NR,DEVICE_NAME);
#endif
blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
blkSIZE_SIZE[MAJOR_NR] = NULL;
blk_SIZE[MAJOR_NR] = NULL;
}
module_init(init_mtdBLOCK);
module_exit(cleanup_mtdBLOCK);
从上述源代码中我们发现,块设备也以与字符设备register_chrdev、unregister_ chrdev 函数类似的方法进行设备的注册与释放:
int register_blkdev(unsigned int MAJOR, const CHAR *NAME, struct BLOCK_DEVICE_operations *bdops);
int unregister_blkdev(unsigned int MAJOR, const CHAR *NAME);
但是,register_chrdev使用一个向 file_operations 结构的指针,而register_blkdev 则使用 BLOCK_DEVICE_operations 结构的指针,其中定义的OPEN、release 和 ioctl 方法和字符设备的对应方法相同,但未定义 read 或者 write 操作。这是因为,所有涉及到块设备的 I/O 通常由系统进行缓冲处理。
块驱动程序最终必须提供完成实际块 I/O 操作的机制,在 Linux 当中,用于这些 I/O 操作的方法称为"request(请求)"。在块设备的注册过程中,需要初始化request队列,这一动作通过blk_init_queue来完成,blk_init_queue函数建立队列,并将该驱动程序的 request 函数关联到队列。在模块的清除阶段,应调用 blk_cleanup_queue 函数。
本例中相关的代码为:
BLK_INIT_QUEUE(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdBLOCK_request, &mtdBLOCK_lock);
blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
每个设备有一个默认使用的请求队列,必要时,可使用 BLK_DEFAULT_QUEUE(MAJOR) 宏得到该默认队列。这个宏在 blk_dev_struct 结构形成的全局数组(该数组名为 blk_dev)中搜索得到对应的默认队列。blk_dev 数组由内核维护,并可通过主设备号索引。blk_dev_struct 接口定义如下:
struct blk_dev_struct {
/*
* queue_proc has to be atomic
*/
request_queue_t request_queue;
queue_proc *queue;
void *data;
};
request_queue 成员包含了初始化之后的 I/O 请求队列,data 成员可由驱动程序使用,以便保存一些私有数据。
request_queue定义为:
struct request_queue
{
/*
* the queue request FREElist, one for reads and one for writes
*/
struct request_list rq[2];
/*
* Together with queue_HEAD for cacheline sharing
*/
struct list_HEAD queue_HEAD;
elevator_t elevator;
request_fn_proc * request_fn;
merge_request_fn * back_merge_fn;
merge_request_fn * front_merge_fn;
merge_requests_fn * merge_requests_fn;
make_request_fn * make_request_fn;
PLUG_DEVICE_fn * PLUG_DEVICE_fn;
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
*/
void * queuedata;
/*
* This is used to remove the PLUG when tq_disk runs.
*/
struct tq_struct PLUG_tq;
/*
* Boolean that indicates whether this queue is PLUGged or not.
*/
CHAR PLUGged;
/*
* Boolean that indicates whether current_request is active or
* not.
*/
CHAR HEAD_active;
/*
* Is meant to protect the queue in the FUTURE instead of
* io_request_lock
*/
spinlock_t queue_lock;
/*
* Tasks wait here for FREE request
*/
wait_queue_HEAD_t wait_for_request;
};
下图表征了blk_dev、blk_dev_struct和request_queue的关系:
下图则表征了块设备的注册和释放过程:
5.小结