Subject: spufs: block in spu_acquire_runnable From: Arnd Bergmann This patch disables the logic that under the covers faults in spu contexts from the page fault handler. For the upstream version, we want something similar, but this is a relatively straightforward patch to make the review easier. Signed-off-by: Arnd Bergmann Index: linux-2.6/arch/powerpc/platforms/cell/spufs/context.c =================================================================== --- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/context.c +++ linux-2.6/arch/powerpc/platforms/cell/spufs/context.c @@ -52,6 +52,7 @@ struct spu_context *alloc_spu_context(st init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->stop_wq); init_waitqueue_head(&ctx->mfc_wq); + init_waitqueue_head(&ctx->run_wq); ctx->state = SPU_STATE_SAVED; ctx->ops = &spu_backing_ops; ctx->owner = get_task_mm(current); Index: linux-2.6/arch/powerpc/platforms/cell/spufs/file.c =================================================================== --- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/file.c +++ linux-2.6/arch/powerpc/platforms/cell/spufs/file.c @@ -245,7 +245,7 @@ static unsigned long spufs_ps_nopfn(stru /* error here usually means a signal.. we might want to test * the error code more precisely though */ - ret = spu_acquire_runnable(ctx, 0); + ret = spu_acquire_runnable(ctx, 1); if (ret) return NOPFN_REFAULT; @@ -1500,7 +1500,7 @@ static ssize_t spufs_mfc_write(struct fi if (ret) goto out; - ret = spu_acquire_runnable(ctx, 0); + ret = spu_acquire_runnable(ctx, 1); if (ret) goto out; Index: linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c =================================================================== --- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/sched.c +++ linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c @@ -630,10 +630,12 @@ static struct spu *find_victim(struct sp return NULL; } + /** * spu_activate - find a free spu for a context and execute it * @ctx: spu context to schedule - * @flags: flags (currently ignored) + * @flags: if nonzero, wait for context to become runnable, + * instead of activating it from here. * * Tries to find a free spu to run @ctx. If no free spu is available * add the context to the runqueue so it gets woken up once an spu @@ -641,6 +643,10 @@ static struct spu *find_victim(struct sp */ int spu_activate(struct spu_context *ctx, unsigned long flags) { + if (flags) + return spufs_wait(ctx->run_wq, + ctx->state == SPU_STATE_RUNNABLE); + do { struct spu *spu; @@ -667,6 +673,7 @@ int spu_activate(struct spu_context *ctx spu_bind_context(spu, ctx); cbe_spu_info[node].nr_active++; mutex_unlock(&cbe_spu_info[node].list_mutex); + wake_up_all(&ctx->run_wq); return 0; } Index: linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h =================================================================== --- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/spufs.h +++ linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h @@ -71,6 +71,7 @@ struct spu_context { wait_queue_head_t wbox_wq; wait_queue_head_t stop_wq; wait_queue_head_t mfc_wq; + wait_queue_head_t run_wq; struct fasync_struct *ibox_fasync; struct fasync_struct *wbox_fasync; struct fasync_struct *mfc_fasync;