2eb1b12049
For SCHED_RR tasks we can do some really trivial timeslicing. Basically we fire up a time for every scheduler tick that searches for a higher or same priority thread that is on the runqueue and if there is one context switches to it. Because we can't lock spus from timer context we actually run this from a delayed runqueue instead of a timer. A nice optimization would be to skip the actual priority bitmap search when there are less contexts than physical spus available. To implement this I need a so far unpublished patch from Andre, and it will be added after we have that patch in. Note that right now we only do the time slicing for SCHED_RR tasks. The code would work for SCHED_OTHER tasks aswell, but their prio value is defered from the one the PPU thread has at time of spu_run, and using this for spu scheduling decisions would make the code very unfair. SCHED_OTHER support will be enabled once we the spu scheduler knows how to calculcate cpu_context.prio (very soon) Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
202 lines
4.8 KiB
C
202 lines
4.8 KiB
C
/*
|
|
* SPU file system -- SPU context management
|
|
*
|
|
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
|
|
*
|
|
* Author: Arnd Bergmann <arndb@de.ibm.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2, or (at your option)
|
|
* any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
*/
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
#include <asm/spu.h>
|
|
#include <asm/spu_csa.h>
|
|
#include "spufs.h"
|
|
|
|
struct spu_context *alloc_spu_context(struct spu_gang *gang)
|
|
{
|
|
struct spu_context *ctx;
|
|
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
|
|
if (!ctx)
|
|
goto out;
|
|
/* Binding to physical processor deferred
|
|
* until spu_activate().
|
|
*/
|
|
spu_init_csa(&ctx->csa);
|
|
if (!ctx->csa.lscsa) {
|
|
goto out_free;
|
|
}
|
|
spin_lock_init(&ctx->mmio_lock);
|
|
kref_init(&ctx->kref);
|
|
mutex_init(&ctx->state_mutex);
|
|
init_MUTEX(&ctx->run_sema);
|
|
init_waitqueue_head(&ctx->ibox_wq);
|
|
init_waitqueue_head(&ctx->wbox_wq);
|
|
init_waitqueue_head(&ctx->stop_wq);
|
|
init_waitqueue_head(&ctx->mfc_wq);
|
|
ctx->state = SPU_STATE_SAVED;
|
|
ctx->ops = &spu_backing_ops;
|
|
ctx->owner = get_task_mm(current);
|
|
if (gang)
|
|
spu_gang_add_ctx(gang, ctx);
|
|
ctx->rt_priority = current->rt_priority;
|
|
ctx->policy = current->policy;
|
|
ctx->prio = current->prio;
|
|
INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
|
|
goto out;
|
|
out_free:
|
|
kfree(ctx);
|
|
ctx = NULL;
|
|
out:
|
|
return ctx;
|
|
}
|
|
|
|
void destroy_spu_context(struct kref *kref)
|
|
{
|
|
struct spu_context *ctx;
|
|
ctx = container_of(kref, struct spu_context, kref);
|
|
mutex_lock(&ctx->state_mutex);
|
|
spu_deactivate(ctx);
|
|
mutex_unlock(&ctx->state_mutex);
|
|
spu_fini_csa(&ctx->csa);
|
|
if (ctx->gang)
|
|
spu_gang_remove_ctx(ctx->gang, ctx);
|
|
kfree(ctx);
|
|
}
|
|
|
|
struct spu_context * get_spu_context(struct spu_context *ctx)
|
|
{
|
|
kref_get(&ctx->kref);
|
|
return ctx;
|
|
}
|
|
|
|
int put_spu_context(struct spu_context *ctx)
|
|
{
|
|
return kref_put(&ctx->kref, &destroy_spu_context);
|
|
}
|
|
|
|
/* give up the mm reference when the context is about to be destroyed */
|
|
void spu_forget(struct spu_context *ctx)
|
|
{
|
|
struct mm_struct *mm;
|
|
spu_acquire_saved(ctx);
|
|
mm = ctx->owner;
|
|
ctx->owner = NULL;
|
|
mmput(mm);
|
|
spu_release(ctx);
|
|
}
|
|
|
|
void spu_unmap_mappings(struct spu_context *ctx)
|
|
{
|
|
if (ctx->local_store)
|
|
unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
|
|
if (ctx->mfc)
|
|
unmap_mapping_range(ctx->mfc, 0, 0x1000, 1);
|
|
if (ctx->cntl)
|
|
unmap_mapping_range(ctx->cntl, 0, 0x1000, 1);
|
|
if (ctx->signal1)
|
|
unmap_mapping_range(ctx->signal1, 0, PAGE_SIZE, 1);
|
|
if (ctx->signal2)
|
|
unmap_mapping_range(ctx->signal2, 0, PAGE_SIZE, 1);
|
|
if (ctx->mss)
|
|
unmap_mapping_range(ctx->mss, 0, 0x1000, 1);
|
|
if (ctx->psmap)
|
|
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
|
|
}
|
|
|
|
/**
|
|
* spu_acquire_exclusive - lock spu contex and protect against userspace access
|
|
* @ctx: spu contex to lock
|
|
*
|
|
* Note:
|
|
* Returns 0 and with the context locked on success
|
|
* Returns negative error and with the context _unlocked_ on failure.
|
|
*/
|
|
int spu_acquire_exclusive(struct spu_context *ctx)
|
|
{
|
|
int ret = -EINVAL;
|
|
|
|
spu_acquire(ctx);
|
|
/*
|
|
* Context is about to be freed, so we can't acquire it anymore.
|
|
*/
|
|
if (!ctx->owner)
|
|
goto out_unlock;
|
|
|
|
if (ctx->state == SPU_STATE_SAVED) {
|
|
ret = spu_activate(ctx, 0);
|
|
if (ret)
|
|
goto out_unlock;
|
|
} else {
|
|
/*
|
|
* We need to exclude userspace access to the context.
|
|
*
|
|
* To protect against memory access we invalidate all ptes
|
|
* and make sure the pagefault handlers block on the mutex.
|
|
*/
|
|
spu_unmap_mappings(ctx);
|
|
}
|
|
|
|
return 0;
|
|
|
|
out_unlock:
|
|
spu_release(ctx);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* spu_acquire_runnable - lock spu contex and make sure it is in runnable state
|
|
* @ctx: spu contex to lock
|
|
*
|
|
* Note:
|
|
* Returns 0 and with the context locked on success
|
|
* Returns negative error and with the context _unlocked_ on failure.
|
|
*/
|
|
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
|
|
{
|
|
int ret = -EINVAL;
|
|
|
|
spu_acquire(ctx);
|
|
if (ctx->state == SPU_STATE_SAVED) {
|
|
/*
|
|
* Context is about to be freed, so we can't acquire it anymore.
|
|
*/
|
|
if (!ctx->owner)
|
|
goto out_unlock;
|
|
ret = spu_activate(ctx, flags);
|
|
if (ret)
|
|
goto out_unlock;
|
|
}
|
|
|
|
return 0;
|
|
|
|
out_unlock:
|
|
spu_release(ctx);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* spu_acquire_saved - lock spu contex and make sure it is in saved state
|
|
* @ctx: spu contex to lock
|
|
*/
|
|
void spu_acquire_saved(struct spu_context *ctx)
|
|
{
|
|
spu_acquire(ctx);
|
|
if (ctx->state != SPU_STATE_SAVED)
|
|
spu_deactivate(ctx);
|
|
}
|