greybus: use alloc_gbuf_data() for both directions
Change the "direction" flag field of a gbuf to be a Boolean called "outbound". Add a Boolean outbound flag to alloc_gbuf_data(), and use it for allocating the data buffer for gbufs for data being transferred in either direction. Update free_gbuf_data() accordingly--letting the host device driver's gbuf data free function handle all of them. Signed-off-by: Alex Elder <elder@linaro.org> Signed-off-by: Greg Kroah-Hartman <greg@kroah.com>
This commit is contained in:
parent
6eb3f4bdec
commit
9a6f6314d1
@ -93,9 +93,11 @@ static void cport_out_callback(struct urb *urb);
|
||||
* void *transfer_buffer;
|
||||
* u32 transfer_buffer_length;
|
||||
*/
|
||||
static int alloc_gbuf_data(struct gbuf *gbuf, unsigned int size, gfp_t gfp_mask)
|
||||
static int alloc_gbuf_data(struct gbuf *gbuf, unsigned int size,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct es1_ap_dev *es1 = hd_to_es1(gbuf->connection->hd);
|
||||
u32 cport_reserve = gbuf->outbound ? 1 : 0;
|
||||
u8 *buffer;
|
||||
|
||||
if (size > ES1_GBUF_MSG_SIZE) {
|
||||
@ -107,8 +109,12 @@ static int alloc_gbuf_data(struct gbuf *gbuf, unsigned int size, gfp_t gfp_mask)
|
||||
* but for ES1, it's so dirt simple, we don't have a choice...
|
||||
*
|
||||
* Also, do a "slow" allocation now, if we need speed, use a cache
|
||||
*
|
||||
* For ES1 outbound buffers need to insert their target
|
||||
* CPort Id before the data; set aside an extra byte for
|
||||
* that purpose in that case.
|
||||
*/
|
||||
buffer = kmalloc(size + 1, gfp_mask);
|
||||
buffer = kmalloc(cport_reserve + size, gfp_mask);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -123,8 +129,10 @@ static int alloc_gbuf_data(struct gbuf *gbuf, unsigned int size, gfp_t gfp_mask)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buffer[0] = gbuf->connection->interface_cport_id;
|
||||
gbuf->transfer_buffer = &buffer[1];
|
||||
/* Insert the cport id for outbound buffers */
|
||||
if (gbuf->outbound)
|
||||
*buffer++ = gbuf->connection->interface_cport_id;
|
||||
gbuf->transfer_buffer = buffer;
|
||||
gbuf->transfer_buffer_length = size;
|
||||
|
||||
/* When we send the gbuf, we need this pointer to be here */
|
||||
@ -136,15 +144,15 @@ static int alloc_gbuf_data(struct gbuf *gbuf, unsigned int size, gfp_t gfp_mask)
|
||||
/* Free the memory we allocated with a gbuf */
|
||||
static void free_gbuf_data(struct gbuf *gbuf)
|
||||
{
|
||||
u8 *transfer_buffer;
|
||||
u8 *buffer;
|
||||
u8 *transfer_buffer = gbuf->transfer_buffer;
|
||||
|
||||
transfer_buffer = gbuf->transfer_buffer;
|
||||
/* Can be called with a NULL transfer_buffer on some error paths */
|
||||
if (transfer_buffer) {
|
||||
buffer = &transfer_buffer[-1]; /* yes, we mean -1 */
|
||||
kfree(buffer);
|
||||
}
|
||||
if (!transfer_buffer)
|
||||
return;
|
||||
|
||||
if (gbuf->outbound)
|
||||
transfer_buffer--; /* Back up to cport id */
|
||||
kfree(transfer_buffer);
|
||||
}
|
||||
|
||||
#define ES1_TIMEOUT 500 /* 500 ms for the SVC to do something */
|
||||
|
@ -27,6 +27,7 @@ static struct kmem_cache *gbuf_head_cache;
|
||||
static struct workqueue_struct *gbuf_workqueue;
|
||||
|
||||
static struct gbuf *__alloc_gbuf(struct gb_connection *connection,
|
||||
bool outbound,
|
||||
gbuf_complete_t complete,
|
||||
gfp_t gfp_mask,
|
||||
void *context)
|
||||
@ -40,6 +41,7 @@ static struct gbuf *__alloc_gbuf(struct gb_connection *connection,
|
||||
kref_init(&gbuf->kref);
|
||||
gbuf->connection = connection;
|
||||
INIT_WORK(&gbuf->event, cport_process_event);
|
||||
gbuf->outbound = outbound;
|
||||
gbuf->complete = complete;
|
||||
gbuf->context = context;
|
||||
|
||||
@ -64,18 +66,17 @@ static struct gbuf *__alloc_gbuf(struct gb_connection *connection,
|
||||
struct gbuf *greybus_alloc_gbuf(struct gb_connection *connection,
|
||||
gbuf_complete_t complete,
|
||||
unsigned int size,
|
||||
bool outbound,
|
||||
gfp_t gfp_mask,
|
||||
void *context)
|
||||
{
|
||||
struct gbuf *gbuf;
|
||||
int retval;
|
||||
|
||||
gbuf = __alloc_gbuf(connection, complete, gfp_mask, context);
|
||||
gbuf = __alloc_gbuf(connection, outbound, complete, gfp_mask, context);
|
||||
if (!gbuf)
|
||||
return NULL;
|
||||
|
||||
gbuf->direction = GBUF_DIRECTION_OUT;
|
||||
|
||||
/* Host controller specific allocation for the actual buffer */
|
||||
retval = connection->hd->driver->alloc_gbuf_data(gbuf, size, gfp_mask);
|
||||
if (retval) {
|
||||
@ -93,13 +94,7 @@ static void free_gbuf(struct kref *kref)
|
||||
{
|
||||
struct gbuf *gbuf = container_of(kref, struct gbuf, kref);
|
||||
|
||||
/* If the direction is "out" then the host controller frees the data */
|
||||
if (gbuf->direction == GBUF_DIRECTION_OUT) {
|
||||
gbuf->connection->hd->driver->free_gbuf_data(gbuf);
|
||||
} else {
|
||||
/* we "own" this in data, so free it ourselves */
|
||||
kfree(gbuf->transfer_buffer);
|
||||
}
|
||||
gbuf->connection->hd->driver->free_gbuf_data(gbuf);
|
||||
|
||||
kmem_cache_free(gbuf_head_cache, gbuf);
|
||||
}
|
||||
@ -197,14 +192,14 @@ void greybus_cport_in(struct greybus_host_device *hd, u16 cport_id,
|
||||
return;
|
||||
}
|
||||
|
||||
gbuf = __alloc_gbuf(connection, ch->handler, GFP_ATOMIC, ch->context);
|
||||
gbuf = greybus_alloc_gbuf(connection, ch->handler, length, false,
|
||||
GFP_ATOMIC, ch->context);
|
||||
|
||||
if (!gbuf) {
|
||||
/* Again, something bad went wrong, log it... */
|
||||
pr_err("can't allocate gbuf???\n");
|
||||
return;
|
||||
}
|
||||
gbuf->hdpriv = hd;
|
||||
gbuf->direction = GBUF_DIRECTION_IN;
|
||||
|
||||
/*
|
||||
* FIXME:
|
||||
@ -212,13 +207,7 @@ void greybus_cport_in(struct greybus_host_device *hd, u16 cport_id,
|
||||
* be, we should move to a model where the hd "owns" all buffers, but we
|
||||
* want something up and working first for now.
|
||||
*/
|
||||
gbuf->transfer_buffer = kmalloc(length, GFP_ATOMIC);
|
||||
if (!gbuf->transfer_buffer) {
|
||||
kfree(gbuf);
|
||||
return;
|
||||
}
|
||||
memcpy(gbuf->transfer_buffer, data, length);
|
||||
gbuf->transfer_buffer_length = length;
|
||||
gbuf->actual_length = length;
|
||||
|
||||
queue_work(gbuf_workqueue, &gbuf->event);
|
||||
|
@ -133,9 +133,7 @@ struct gbuf {
|
||||
u32 transfer_buffer_length;
|
||||
u32 actual_length;
|
||||
|
||||
#define GBUF_DIRECTION_OUT 0
|
||||
#define GBUF_DIRECTION_IN 1
|
||||
unsigned int direction : 1; /* 0 is out, 1 is in */
|
||||
bool outbound; /* AP-relative data direction */
|
||||
|
||||
void *context;
|
||||
struct work_struct event;
|
||||
@ -172,7 +170,8 @@ struct svc_msg;
|
||||
struct greybus_host_driver {
|
||||
size_t hd_priv_size;
|
||||
|
||||
int (*alloc_gbuf_data)(struct gbuf *gbuf, unsigned int size, gfp_t gfp_mask);
|
||||
int (*alloc_gbuf_data)(struct gbuf *gbuf, unsigned int size,
|
||||
gfp_t gfp_mask);
|
||||
void (*free_gbuf_data)(struct gbuf *gbuf);
|
||||
int (*submit_svc)(struct svc_msg *svc_msg,
|
||||
struct greybus_host_device *hd);
|
||||
@ -202,7 +201,7 @@ void greybus_gbuf_finished(struct gbuf *gbuf);
|
||||
|
||||
struct gbuf *greybus_alloc_gbuf(struct gb_connection *connection,
|
||||
gbuf_complete_t complete, unsigned int size,
|
||||
gfp_t gfp_mask, void *context);
|
||||
bool outbound, gfp_t gfp_mask, void *context);
|
||||
void greybus_free_gbuf(struct gbuf *gbuf);
|
||||
struct gbuf *greybus_get_gbuf(struct gbuf *gbuf);
|
||||
#define greybus_put_gbuf greybus_free_gbuf
|
||||
|
@ -125,7 +125,7 @@ struct gb_operation *gb_operation_create(struct gb_connection *connection,
|
||||
/* Our buffer holds a header in addition to the requested payload */
|
||||
size += sizeof(*header);
|
||||
gbuf = greybus_alloc_gbuf(connection, gbuf_out_callback, size,
|
||||
GFP_KERNEL, operation);
|
||||
true, GFP_KERNEL, operation);
|
||||
if (gbuf) {
|
||||
kfree(operation);
|
||||
return NULL;
|
||||
|
Loading…
x
Reference in New Issue
Block a user