qemu线程池:一个semaphore的使用范例

qemu里面有个服务于aio的线程池:

struct ThreadPool {
    AioContext *ctx;
    QEMUBH *completion_bh;
    QemuMutex lock;
    QemuCond check_cancel;
    QemuCond worker_stopped;
    QemuSemaphore sem;
    int max_threads;
    QEMUBH *new_thread_bh;

    /* The following variables are only accessed from one AioContext. */
    QLIST_HEAD(, ThreadPoolElement) head;

    /* The following variables are protected by lock.  */
    QTAILQ_HEAD(, ThreadPoolElement) request_list;
    int cur_threads;
    int idle_threads;
    int new_threads;     /* backlog of threads we need to create */
    int pending_threads; /* threads created but not running yet */
    int pending_cancellations; /* whether we need a cond_broadcast */
    bool stopping;
};

qemu在thread_pool_init_one建池子的时候注册了一个延迟执行的work:
pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool);

这个work将在spawn_thread里面被安排调度

static void spawn_thread(ThreadPool *pool)
{
    pool->cur_threads++;
    pool->new_threads++; //这里是为了记录下spawn thread被调用的次数,为后面安排的创建线程的后半部提供数据,因为不是每次调用进来都会安排后半部工作的
    /* If there are threads being created, they will spawn new workers, so
     * we don't spend time creating many threads in a loop holding a mutex or
     * starving the current vcpu.
     *
     * If there are no idle threads, ask the main thread to create one, so we
     * inherit the correct affinity instead of the vcpu affinity.
     */
    if (!pool->pending_threads) { //如果前面已经有安排,就不再重复安排
        qemu_bh_schedule(pool->new_thread_bh);
    }

 

当work被调度到的时候,执行spawn_thread_bh_fn

static void spawn_thread_bh_fn(void *opaque)
{
    ThreadPool *pool = opaque;

    qemu_mutex_lock(&pool->lock);
    do_spawn_thread(pool);
    qemu_mutex_unlock(&pool->lock);
}
static void do_spawn_thread(ThreadPool *pool)
{
    QemuThread t;

    /* Runs with lock taken.  */
    if (!pool->new_threads) {
        return;
    }

    pool->new_threads--;
    pool->pending_threads++; //避免重复安排

    qemu_thread_create(&t, "worker", worker_thread, pool, QEMU_THREAD_DETACHED);
}

 

这里采用了递归的方式来一次性创建多个worker,新创建的worker线程会递归调动do_spawn_thread来创建下一个worker,直到发现new_threads为0

这里有个问题,如果qemu_thread_create创建新线程失败,那么就会导致后面新的线程永远无法创建,因为pending_threads不会被减扣为0,前面spawn_thread就不会再安排新的下半部工作来创建线程了。

下面是aio派发任务的函数thread_pool_submit_aio,可以看到,池子中的线程数是动态增加的,如果有空闲的线程或者线程数已达上限是不会创建新的线程的,并且采用了信号量通知的方法来减少线程轮询开销,有任务的时候才放开一个额度,而不是让线程一直尝试拿后面的mutex锁再去看下request list是不是空。

qemu_mutex_lock(&pool->lock);
   if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) {
       spawn_thread(pool);
   }
   QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs);
   qemu_mutex_unlock(&pool->lock);
   qemu_sem_post(&pool->sem); //这里会增加sem的计数,让一个idle的worker获得执行机会
   return &req->common;

 

worker创建的时候以及worker在func执行完后会再次尝试获取pool->sem(等待sem>0),等待新的任务:

pool->idle_threads++;
qemu_mutex_unlock(&pool->lock);
ret = qemu_sem_timedwait(&pool->sem, 10000);//减扣sem计数
qemu_mutex_lock(&pool->lock);
pool->idle_threads--;

池子资源释放的时候,会标记pool->stopping并给所有worker一个最后的任务(通过sem_post):释放自己:

thread_pool_free:

/* Stop new threads from spawning */
qemu_bh_delete(pool->new_thread_bh);
pool->cur_threads -= pool->new_threads;
pool->new_threads = 0;

/* Wait for worker threads to terminate */
pool->stopping = true;
while (pool->cur_threads > 0) {
qemu_sem_post(&pool->sem);
qemu_cond_wait(&pool->worker_stopped, &pool->lock);
}

KVM shared MSRs

有些msr寄存器在用户态才有可能访问,内核态不会访问,那么我们vmexit的时候是不需要切换到host值的,而且vmcs里面没有保存相应的寄存器值,只在vcpu需要返回到用户态的时候才把这些msr的值保存到shared_msrs里面,同时切换到host之前保存的值。avi大神的patch解释了这种可以不用在内核态切换的寄存器的优化思路:https://lore.kernel.org/patchwork/cover/170941/

VMCS本身只对很少的一些MSR寄存器进行切换,所以原来大部分MSR的切换是依赖软件进行的,软件切换的开销很大,上述patch则是对此的优化。

这些寄存器一定不会在内核态被访问吗?KVM模块能保证自己不去访问,但是如何保证在内核开抢占的情况下,其他内核代码不会访问这些寄存器呢?AMD的svm里面比较诚实的提出了这种担心,侧面佐证了这确实是个隐患,需要开发者心中有数:
svm_vcpu_load:

    /* This assumes that the kernel never uses MSR_TSC_AUX */
    if (static_cpu_has(X86_FEATURE_RDTSCP))
        wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
shared_msrs里面host值来自于vcpu_enter_guest调用kvm_x86_ops->prepare_guest_switch(vcpu)的时候保存的值。
vcpu返回用户态切换的过程是调用kvm_on_user_return函数,而这个函数也就是在kvm_x86_ops->prepare_guest_switch(vcpu)的时候调用kvm_set_shared_msr配置的,后者调用了内核函数:user_return_notifier_register(&smsr->urn); 这个函数是内核提供的返回用户态时刻的通知链接口,kvm用它来给vcpu返回挂了个钩子,如此实现了vcpu ioctl系统调用返回qemu时将shared_msrs切换回host之前保存的值。
kvm_on_user_return就是注册到内核的vcpu返回用户态时执行的逻辑,在切换shared_msrs的同时也把自己从通知链上注销了,因为一方面不需要别的进程在退出的时候也执行这个函数另一方面在vcpu enter guest的时候还会再注册的。

MSR从HOST切换到GUEST

该过程发生在vmentry时,最后调用链如下:
vcpu_enter_guest
    kvm_x86_ops->prepare_guest_switch(vcpu) ==> vmx_save_host_state
        kvm_set_shared_msr(vmx->guest_msrs[i].index, vmx->guest_msrs[i].data, vmx->guest_msrs[i].mask)
从host切到guest,guest的msr值从vmx->guest_msrs[i].data这里来
vmx->guest_msrs在vmx_get_msr时被读取,vmx_set_msr时被更改,vmx_get/set_msr一般在handle_rdmsr/wrmsr以及热迁移前后做save load msrs时被qemu触发调用。vmx_set_msr里面同样会调用kvm_set_shared_msr,这里其实更多是为了试一下是否可以设置成功,如果设置不成功那么要让vmx->guest_msrs里面相应的msr值保持原来的设置而不去更新,因为可能新设置的是个非法的值。实际上vmx_set_msr只需要更新guest_msrs就可以了,因为vmentry的时候还是会从guest_msrs里面读取再kvm_set_shared_msr。

MSR从GUEST切换到HOST

vmexit时并不会切share_msrs涉及到的msr,而是发生在vcpu要返回user mode之际,内核调用前面注册的通知函数,调用链如下:
prepare_exit_to_usermode
    exit_to_usermode_loop
        fire_user_return_notifiers
            urn->on_user_return(urn) ==> kvm_on_user_return 找到对应的shared_msrs并恢复到vmentry前的host值
static void kvm_on_user_return(struct user_return_notifier *urn)
{
    unsigned slot;
    struct kvm_shared_msrs *locals
        = container_of(urn, struct kvm_shared_msrs, urn);
    struct kvm_shared_msr_values *values;
    unsigned long flags;

    /*
     * Disabling irqs at this point since the following code could be
     * interrupted and executed through kvm_arch_hardware_disable()
     */
    local_irq_save(flags);
    if (locals->registered) {
        locals->registered = false;
        user_return_notifier_unregister(urn);
    }
    local_irq_restore(flags);
    for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
        values = &locals->values[slot];
        if (values->host != values->curr) {
            wrmsrl(shared_msrs_global.msrs[slot], values->host);
            values->curr = values->host;
        }
    }
}

 

vfio 直通设备的 memory region 初始化

vfio特别用一个数据结构来管理设备的memory region
typedef struct VFIORegion {
    struct VFIODevice *vbasedev;
    off_t fd_offset; /* offset of region within device fd */
    MemoryRegion *mem; /* slow, read/write access */
    size_t size;
    uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
    uint32_t nr_mmaps;
    VFIOMmap *mmaps;
    uint8_t nr; /* cache the region number for debug */
} VFIORegion;

 

注意到它还有个域是VFIOMmap结构的指针:
typedef struct VFIOMmap {
    MemoryRegion mem;
    void *mmap;
    off_t offset;
    size_t size;
} VFIOMmap;

 

感觉是不是有冗余,那么外层结构里面的mem是不是指向内层中的mem呢?
上面的mem还有个奇怪的注释:slow,难道还有一种快一点的MR?那是不是指这里是IO的MR,另外还有个RAM的mr?

Continue reading “vfio 直通设备的 memory region 初始化”

IOMMU group and ACS cap

VFIO做VM的设备直通过程中,需要把直通设备所在iommu group里面所有的设备都unbind掉,这是为啥呢,iommu group又是啥,木有遇到该问题的小伙伴你们肯定年轻而富有:)你们的设备都是有ACS的呢,这又是啥,咱先来看看官方文档吧:

Continue reading “IOMMU group and ACS cap”

libvirt 向qemu传文件描述符

libvirt创建的qemu进程里面有一些fd的参数,这些文件是libvirt帮qemu打开的一些设备文件句柄等,比如:
qemu … -netdev tap,fd=24,id=hostnet1,vhost=on,vhostfd=25
因为需要libvirt帮忙先配置好后端以及处于安全考虑;但是qemu起来就是另外一个进程,给个fd号就能直接用了吗,显然不是,下面从代码角度分析下

Continue reading “libvirt 向qemu传文件描述符”

sriov vf get iommu group kernel code trace

VF device driver call:
pci_enable_sriov -> sriov_enable -> pci_iov_add_virtfn -> pci_device_add -> device_add ->
    blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_ADD_DEVICE, dev);
pci bus register a iommu notifier will be called when device_add start to notify:
static int __init pci_iommu_init(void)
{
        if (iommu_detected)
                intel_iommu_init();

        return 0;
}

/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);

intel_iommu_init -> iommu_bus_init -> “nb->notifier_call = iommu_bus_notifier;”

        if (action == BUS_NOTIFY_ADD_DEVICE) {
                if (ops->add_device)
                        return ops->add_device(dev);
ops->add_device:
static struct iommu_ops intel_iommu_ops = {
        .capable        = intel_iommu_capable,
        .domain_alloc   = intel_iommu_domain_alloc,
        .domain_free    = intel_iommu_domain_free,
        .attach_dev     = intel_iommu_attach_device,
        .detach_dev     = intel_iommu_detach_device,
        .map            = intel_iommu_map,
        .unmap          = intel_iommu_unmap,
        .map_sg         = default_iommu_map_sg,
        .iova_to_phys   = intel_iommu_iova_to_phys,
        .add_device     =intel_iommu_add_device,
        .remove_device  = intel_iommu_remove_device,
        .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
};

intel_iommu_add_device:

static int intel_iommu_add_device(struct device *dev)
{
        struct intel_iommu *iommu;
        struct iommu_group *group;
        u8 bus, devfn;

        iommu = device_to_iommu(dev, &bus, &devfn);
        if (!iommu)
                return -ENODEV;

        iommu_device_link(iommu->iommu_dev, dev);

        group = iommu_group_get_for_dev(dev);

        if (IS_ERR(group))
                return PTR_ERR(group);

        iommu_group_put(group);
        return 0;
}
iommu_group_get_for_dev:
this will find or create an iommu group for the VF device

Linux 内核 schedule时的preemption notify机制

内核进行进程切换时,先调用了__schedule,在关抢占后调用context_switch:
static void __sched __schedule(void)
{
    struct task_struct *prev, *next;
    unsigned long *switch_count;
    struct rq *rq;
    int cpu;

need_resched:
    preempt_disable();
    cpu = smp_processor_id();
    rq = cpu_rq(cpu);
...
raw_spin_lock_irq(&rq->lock);
...

context_switch(rq, prev, next);

...

sched_preempt_enable_no_resched();
static inline void
context_switch(struct rq *rq, struct task_struct *prev,
           struct task_struct *next)
{
    struct mm_struct *mm, *oldmm;

    prepare_task_switch(rq, prev, next);

    mm = next->mm;
    oldmm = prev->active_mm;
…

    finish_task_switch(this_rq(), prev);
prepare_task_switch里面调用fire_sched_out_preempt_notifiers,进而调用prev进程注册的sched_out操作,
static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
            struct task_struct *next)
{
    trace_sched_switch(prev, next);
    sched_info_switch(prev, next);
    perf_event_task_sched_out(prev, next);
    fire_sched_out_preempt_notifiers(prev, next);
    prepare_lock_switch(rq, next);
    prepare_arch_switch(next);
}
这里是分支:如果是新创建的进程被调度了,要调用schedule_tail:
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
asmlinkage void schedule_tail(struct task_struct *prev)
    __releases(rq->lock)
{
    struct rq *rq = this_rq();

    finish_task_switch(rq, prev);

    /*
     * FIXME: do we need to worry about rq being invalidated by the
     * task_switch?
     */
    post_schedule(rq);

#ifdef __ARCH_WANT_UNLOCKED_CTXSW
    /* In this case, finish_task_switch does not reenable preemption */
    preempt_enable();
#endif
    if (current->set_child_tid)
        put_user(task_pid_vnr(current), current->set_child_tid);
}
回到主题:finish_task_switch这里会调用fire_sched_in_preempt_notifiers:
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
    struct preempt_notifier *notifier;
 
    hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
        notifier->ops->sched_in(notifier, raw_smp_processor_id());
}
preempt_notifiers在哪里注册的呢,对kvm来说是这里:
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
    int r;
    struct kvm_vcpu *vcpu, *v;
 
    if (id >= KVM_MAX_VCPUS)
        return -EINVAL;
 
    vcpu = kvm_arch_vcpu_create(kvm, id);
    if (IS_ERR(vcpu))
        return PTR_ERR(vcpu);
 
    preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
 
    r = kvm_arch_vcpu_setup(vcpu);

kvm模块加载的时候vmx_init->kvm_init里面初始化了kvm_preempt_ops

    kvm_preempt_ops.sched_in = kvm_sched_in;
    kvm_preempt_ops.sched_out = kvm_sched_out;
所以kvm_sched_in和kvm_sched_out被调用时的上下文是关抢占的

[MOS] deadlock detection

MOS: Modern Operating Systems

deadlock detection with one resource of each type

如果每种资源都是独一份的,可以用上面这样的图来分析资源竞争情况。
图中方块代表资源,圆形代表进程;指向进程的箭头表示资源被该进程拥有,而从进程出发的箭头表示其需要的资源。
有向图中只要存在一个闭环,就存在一个死锁

找出闭环的方法有很多种,这里给出一个比较直观的方法:
0)首先把图中所有箭头记为unmarked,初始化一个空链表L,并选中一个节点(无论是进程还是资源,这里只考虑抽象出的有向图)为当前节点
1)判断该节有没有出现在L中,如果没有则加入L尾部进入下一步;如果有则说明存在一个闭环,即发现死锁
2)当前节点如果有向外的unmarked箭头就顺着它找到下一个节点,同时将该箭头标记为marked,跳到1);如果没有unmarked箭头进入下一步
3)没有unmarked的箭头表明已经进入死胡同,删掉当前节点,回溯到源节点,将其设置为当前节点,跳到2)

deadlock detection with multiple resources of each type

当一种资源有多个可用实体时,我们使用一种基于矩阵的算法
向量E表示m种资源目前被占用的情况,A表示m种资源剩余量
矩阵C表示n个进程目前分别占用各种资源的情况,每行代表一个进程,每列是一种资源对应的各个进程占用情况,m列求和就等于Em
矩阵R表示n个进程目前对各种资源的需求量,行列意义同上
定义向量X<=Y当且仅当X的每个分量都<=Y的对应分量
算法步骤:
0)所有进程标记为unmarked
1)找到一个unmarked进程i,其占用的资源Ri<=A,下一步;没有满足该条件的进程,跳到3)
2)A=A+Ci,进程i标记为marked,跳到1)
3)如果所有进程都是marked,表示都得到了执行,没有死锁;否则,剩余的所有unmarked进程构成了死锁

虚拟机对x2apic destination mode的选择

首先简单介绍下apic的destination mode:

physical mode下高32bit(destination field)是apic id
logical mode下高32bit是MDA,又分为flat和cluster两种:
flat就是一个bitmap
而cluster包含两个部分,高16bit为cluster ID,后16bit为bitmap;这里还分flat和hierarchy;这里就不深究了。

根据intel spec:Flat logicalmode is not supported in the x2APIC mode. Hence the Destination Format Register(DFR) is eliminated in x2APIC mode. 在x2apic已经没有FDR,所以在logical mode下不再支持flat mode了,只有cluster mode由于x2apic在原来apic的8bit apicid基础上扩展到32bit

所以通过ioapic发中断和发MSI(不是MSIX)中断的设备就有问题了;他们需要做IR,interrupt remapping(VT-d提供的一项能力,通过IOMMU来做)
但是,也有些例外,比如使用physical mode且cpu个数小于256时,8bit的apic id已经完全可以cover所有的cpu了,而且这8bit直接可以写入x2apic的32bit的destination field,所以这种情况下可以直接使用x2apic physical mode而无需IR能力

https://patchwork.kernel.org/patch/2826342/这个邮件内容告诉我们,使用x2apic而同时又没有IR能力的情况,只有虚拟化场景(bios不能enable VT-d这种情况好像要被禁止了,enable x2apic一定要开VT-d)

然而,虚拟化场景下为何一定要enable x2apic呢?
IBM给出了答案Red Hat Enterprise Linux 6 implements x2APIC emulation for KVM guests. Advanced programmable interrupt controllers (APICs) are used in symmetric multiprocessor (SMP) computer systems. x2APIC is a machine state register (MSR) interface to a local APIC with performance and scalability enhancements. IBM lab tests show that enabling the x2APIC support for Red Hat Enterprise Linux 6 guests can result in 2% to 5% throughput improvement for many I/O workloads. You can enable the x2APIC support by specifying the -cpu qemu64,+x2apic option on the qemu-kvm command for a KVM guest.

然而这解释当然是隔靴搔痒,真正的解释还是要show me the patch:

commit ce69a784504222c3ab6f1b3c357d09ec5772127a
Author: Gleb Natapov <gleb@redhat.com>
Date:   Mon Jul 20 15:24:17 2009 +0300

    x86/apic: Enable x2APIC without interrupt remapping under KVM

    KVM would like to provide x2APIC interface to a guest without emulating
    interrupt remapping device. The reason KVM prefers guest to use x2APIC
    is that x2APIC interface is better virtualizable and provides better
    performance than mmio xAPIC interface:

     - msr exits are faster than mmio (no page table walk, emulation)
     - no need to read back ICR to look at the busy bit
     - one 64 bit ICR write instead of two 32 bit writes
     - shared code with the Hyper-V paravirt interface

    Included patch changes x2APIC enabling logic to enable it even if IR
    initialization failed, but kernel runs under KVM and no apic id is
    greater than 255 (if there is one spec requires BIOS to move to x2apic
    mode before starting an OS).

    -v2: fix build
    -v3: fix bug causing compiler warning

    Signed-off-by: Gleb Natapov <gleb@redhat.com>
    Acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
    Cc: Sheng Yang <sheng@linux.intel.com>
    Cc: "avi@redhat.com" <avi@redhat.com>
    LKML-Reference: <20090720122417.GR5638@redhat.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

 

PTRACE的权限检查,自由是有前提的~

系统调用PTRACE的一切开始于这里:
COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
               compat_long_t, addr, compat_long_t, data)

首先提供了attach的绿色通道:
if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
        ret = ptrace_attach(child, request, addr, data);
attach做的事情就是把当前进程变成了被trace进程的父进程,同时把被trace进程加入到当前进程的ptraced链表中;
__ptrace_link(task, current);
list_add(&child->ptrace_entry, &new_parent->ptraced);
    child->parent = new_parent;
但是这样做是有前提的,具体请看后面的分析,这里先按下不表。

attach?wtf?为啥要在用之前attach,搞得这么神秘,难道直接用不可以吗?
不可以,因为你不能直接访问其他进程的地址空间,否则地址空间的隔离不就变成笑话了。
所以这个ptrace系统调用入口做了个很重要的check:
ret = ptrace_check_attach(child, request == PTRACE_KILL ||
                  request == PTRACE_INTERRUPT);
    if (!ret) { //只有返回为0,即判断条件成立,才继续执行对ptrace的request
        ret = compat_arch_ptrace(child, request, addr, data);
        if (ret || request != PTRACE_DETACH)
            ptrace_unfreeze_traced(child);
    }
child->ptrace && child->parent == current
只有过了这一关,才能进行后面的操作(除了kil和interrupt,他们自己的处理逻辑里面有此类的判断或者无需),那难道随便就可以attach,那我就麻烦一点先attach呗;显然不会这么傻,前面卖了个关子,这里来看看attach里面做的权限检查,必须是有权的人才能随便搞的,屁民是要守法的!!

权限管理的水比较深,博主还没吃透,所以这里先简单列一下我能看懂的部分:
etval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
这里有两个条件,1)我自己人改自己人ok(这里的suid euid等等的请看后面链接);2)我的上层命名空间ok(我理解是,比如像root用户这种大boss)
    tcred = __task_cred(task);
    if (uid_eq(cred->uid, tcred->euid) &&
        uid_eq(cred->uid, tcred->suid) &&
        uid_eq(cred->uid, tcred->uid)  &&
        gid_eq(cred->gid, tcred->egid) &&
        gid_eq(cred->gid, tcred->sgid) &&
        gid_eq(cred->gid, tcred->gid))
        goto ok;
    if (ptrace_has_cap(tcred->user_ns, mode))
        goto ok;
另外还要做命名空间能力的检测,看看目标进程的命名空间有没有被ptrace的能力:
ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE)

参考链接