staging: ozwpan: Added device state support

Added support for maintaining state and data buffering for devices
connected via the network.

Signed-off-by: Chris Kelly <ckelly@ozmodevices.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Chris Kelly 2012-02-20 21:11:37 +00:00 committed by Greg Kroah-Hartman
parent 1619cb6f2d
commit bc3157dde3
4 changed files with 1365 additions and 0 deletions

View File

@ -0,0 +1,339 @@
/* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "ozconfig.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "oztrace.h"
#include "ozalloc.h"
/*------------------------------------------------------------------------------
*/
#define OZ_ELT_INFO_MAGIC_USED 0x35791057
#define OZ_ELT_INFO_MAGIC_FREE 0x78940102
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
int oz_elt_buf_init(struct oz_elt_buf *buf)
{
memset(buf, 0, sizeof(struct oz_elt_buf));
INIT_LIST_HEAD(&buf->stream_list);
INIT_LIST_HEAD(&buf->order_list);
INIT_LIST_HEAD(&buf->isoc_list);
buf->max_free_elts = 32;
spin_lock_init(&buf->lock);
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_elt_buf_term(struct oz_elt_buf *buf)
{
struct list_head *e;
int i;
/* Free any elements in the order or isoc lists. */
for (i = 0; i < 2; i++) {
struct list_head *list;
if (i)
list = &buf->order_list;
else
list = &buf->isoc_list;
e = list->next;
while (e != list) {
struct oz_elt_info *ei =
container_of(e, struct oz_elt_info, link_order);
e = e->next;
oz_free(ei);
}
}
/* Free any elelment in the pool. */
while (buf->elt_pool) {
struct oz_elt_info *ei =
container_of(buf->elt_pool, struct oz_elt_info, link);
buf->elt_pool = buf->elt_pool->next;
oz_free(ei);
}
buf->free_elts = 0;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
{
struct oz_elt_info *ei = 0;
spin_lock_bh(&buf->lock);
if (buf->free_elts && buf->elt_pool) {
ei = container_of(buf->elt_pool, struct oz_elt_info, link);
buf->elt_pool = ei->link.next;
buf->free_elts--;
spin_unlock_bh(&buf->lock);
if (ei->magic != OZ_ELT_INFO_MAGIC_FREE) {
oz_trace("oz_elt_info_alloc: ei with bad magic: 0x%x\n",
ei->magic);
}
} else {
spin_unlock_bh(&buf->lock);
ei = oz_alloc(sizeof(struct oz_elt_info), GFP_ATOMIC);
}
if (ei) {
ei->flags = 0;
ei->app_id = 0;
ei->callback = 0;
ei->context = 0;
ei->stream = 0;
ei->magic = OZ_ELT_INFO_MAGIC_USED;
INIT_LIST_HEAD(&ei->link);
INIT_LIST_HEAD(&ei->link_order);
}
return ei;
}
/*------------------------------------------------------------------------------
* Precondition: oz_elt_buf.lock must be held.
* Context: softirq or process
*/
void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei)
{
if (ei) {
if (ei->magic == OZ_ELT_INFO_MAGIC_USED) {
buf->free_elts++;
ei->link.next = buf->elt_pool;
buf->elt_pool = &ei->link;
ei->magic = OZ_ELT_INFO_MAGIC_FREE;
} else {
oz_trace("oz_elt_info_free: bad magic ei: %p"
" magic: 0x%x\n",
ei, ei->magic);
}
}
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
{
struct list_head *e;
e = list->next;
spin_lock_bh(&buf->lock);
while (e != list) {
struct oz_elt_info *ei;
ei = container_of(e, struct oz_elt_info, link);
e = e->next;
oz_elt_info_free(buf, ei);
}
spin_unlock_bh(&buf->lock);
}
/*------------------------------------------------------------------------------
*/
int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
{
struct oz_elt_stream *st =
oz_alloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
oz_trace("oz_elt_stream_create(0x%x)\n", id);
if (st == 0)
return -1;
memset(st, 0, sizeof(struct oz_elt_stream));
atomic_set(&st->ref_count, 1);
st->id = id;
st->max_buf_count = max_buf_count;
INIT_LIST_HEAD(&st->elt_list);
spin_lock_bh(&buf->lock);
list_add_tail(&st->link, &buf->stream_list);
spin_unlock_bh(&buf->lock);
return 0;
}
/*------------------------------------------------------------------------------
*/
int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
{
struct list_head *e;
struct oz_elt_stream *st;
oz_trace("oz_elt_stream_delete(0x%x)\n", id);
spin_lock_bh(&buf->lock);
e = buf->stream_list.next;
while (e != &buf->stream_list) {
st = container_of(e, struct oz_elt_stream, link);
if (st->id == id) {
list_del(e);
break;
}
st = 0;
}
if (!st) {
spin_unlock_bh(&buf->lock);
return -1;
}
e = st->elt_list.next;
while (e != &st->elt_list) {
struct oz_elt_info *ei =
container_of(e, struct oz_elt_info, link);
e = e->next;
list_del_init(&ei->link);
list_del_init(&ei->link_order);
st->buf_count -= ei->length;
oz_trace2(OZ_TRACE_STREAM, "Stream down: %d %d %d\n",
st->buf_count,
ei->length, atomic_read(&st->ref_count));
oz_elt_stream_put(st);
oz_elt_info_free(buf, ei);
}
spin_unlock_bh(&buf->lock);
oz_elt_stream_put(st);
return 0;
}
/*------------------------------------------------------------------------------
*/
void oz_elt_stream_get(struct oz_elt_stream *st)
{
atomic_inc(&st->ref_count);
}
/*------------------------------------------------------------------------------
*/
void oz_elt_stream_put(struct oz_elt_stream *st)
{
if (atomic_dec_and_test(&st->ref_count)) {
oz_trace("Stream destroyed\n");
oz_free(st);
}
}
/*------------------------------------------------------------------------------
* Precondition: Element buffer lock must be held.
* If this function fails the caller is responsible for deallocating the elt
* info structure.
*/
int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
struct oz_elt_info *ei)
{
struct oz_elt_stream *st = 0;
struct list_head *e;
if (id) {
list_for_each(e, &buf->stream_list) {
st = container_of(e, struct oz_elt_stream, link);
if (st->id == id)
break;
}
if (e == &buf->stream_list) {
/* Stream specified but stream not known so fail.
* Caller deallocates element info. */
return -1;
}
}
if (st) {
/* If this is an ISOC fixed element that needs a frame number
* then insert that now. Earlier we stored the unit count in
* this field.
*/
struct oz_isoc_fixed *body = (struct oz_isoc_fixed *)
&ei->data[sizeof(struct oz_elt)];
if ((body->app_id == OZ_APPID_USB) && (body->type
== OZ_USB_ENDPOINT_DATA) &&
(body->format == OZ_DATA_F_ISOC_FIXED)) {
u8 unit_count = body->frame_number;
body->frame_number = st->frame_number;
st->frame_number += unit_count;
}
/* Claim stream and update accounts */
oz_elt_stream_get(st);
ei->stream = st;
st->buf_count += ei->length;
/* Add to list in stream. */
list_add_tail(&ei->link, &st->elt_list);
oz_trace2(OZ_TRACE_STREAM, "Stream up: %d %d\n",
st->buf_count, ei->length);
/* Check if we have too much buffered for this stream. If so
* start dropping elements until we are back in bounds.
*/
while ((st->buf_count > st->max_buf_count) &&
!list_empty(&st->elt_list)) {
struct oz_elt_info *ei2 =
list_first_entry(&st->elt_list,
struct oz_elt_info, link);
list_del_init(&ei2->link);
list_del_init(&ei2->link_order);
st->buf_count -= ei2->length;
oz_elt_info_free(buf, ei2);
oz_elt_stream_put(st);
}
}
list_add_tail(&ei->link_order, isoc ?
&buf->isoc_list : &buf->order_list);
return 0;
}
/*------------------------------------------------------------------------------
*/
int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
unsigned max_len, struct list_head *list)
{
int count = 0;
struct list_head *e;
struct list_head *el;
struct oz_elt_info *ei;
spin_lock_bh(&buf->lock);
if (isoc)
el = &buf->isoc_list;
else
el = &buf->order_list;
e = el->next;
while (e != el) {
struct oz_app_hdr *app_hdr;
ei = container_of(e, struct oz_elt_info, link_order);
e = e->next;
if ((*len + ei->length) <= max_len) {
app_hdr = (struct oz_app_hdr *)
&ei->data[sizeof(struct oz_elt)];
app_hdr->elt_seq_num = buf->tx_seq_num[ei->app_id]++;
if (buf->tx_seq_num[ei->app_id] == 0)
buf->tx_seq_num[ei->app_id] = 1;
*len += ei->length;
list_del(&ei->link);
list_del(&ei->link_order);
if (ei->stream) {
ei->stream->buf_count -= ei->length;
oz_trace2(OZ_TRACE_STREAM,
"Stream down: %d %d\n",
ei->stream->buf_count, ei->length);
oz_elt_stream_put(ei->stream);
ei->stream = 0;
}
INIT_LIST_HEAD(&ei->link_order);
list_add_tail(&ei->link, list);
count++;
} else {
break;
}
}
spin_unlock_bh(&buf->lock);
return count;
}
/*------------------------------------------------------------------------------
*/
int oz_are_elts_available(struct oz_elt_buf *buf)
{
return buf->order_list.next != &buf->order_list;
}
/*------------------------------------------------------------------------------
*/
void oz_trim_elt_pool(struct oz_elt_buf *buf)
{
struct list_head *free = 0;
struct list_head *e;
spin_lock_bh(&buf->lock);
while (buf->free_elts > buf->max_free_elts) {
e = buf->elt_pool;
buf->elt_pool = e->next;
e->next = free;
free = e;
buf->free_elts--;
}
spin_unlock_bh(&buf->lock);
while (free) {
struct oz_elt_info *ei =
container_of(free, struct oz_elt_info, link);
free = free->next;
oz_free(ei);
}
}

View File

@ -0,0 +1,70 @@
/* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
#ifndef _OZELTBUF_H
#define _OZELTBUF_H
#include "ozprotocol.h"
/*-----------------------------------------------------------------------------
*/
struct oz_pd;
typedef void (*oz_elt_callback_t)(struct oz_pd *pd, long context);
struct oz_elt_stream {
struct list_head link;
struct list_head elt_list;
atomic_t ref_count;
unsigned buf_count;
unsigned max_buf_count;
u8 frame_number;
u8 id;
};
#define OZ_MAX_ELT_PAYLOAD 255
struct oz_elt_info {
struct list_head link;
struct list_head link_order;
u8 flags;
u8 app_id;
oz_elt_callback_t callback;
long context;
struct oz_elt_stream *stream;
u8 data[sizeof(struct oz_elt) + OZ_MAX_ELT_PAYLOAD];
int length;
unsigned magic;
};
/* Flags values */
#define OZ_EI_F_MARKED 0x1
struct oz_elt_buf {
spinlock_t lock;
struct list_head stream_list;
struct list_head order_list;
struct list_head isoc_list;
struct list_head *elt_pool;
int free_elts;
int max_free_elts;
u8 tx_seq_num[OZ_NB_APPS];
};
int oz_elt_buf_init(struct oz_elt_buf *buf);
void oz_elt_buf_term(struct oz_elt_buf *buf);
struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf);
void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei);
void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list);
int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count);
int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id);
void oz_elt_stream_get(struct oz_elt_stream *st);
void oz_elt_stream_put(struct oz_elt_stream *st);
int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
struct oz_elt_info *ei);
int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
unsigned max_len, struct list_head *list);
int oz_are_elts_available(struct oz_elt_buf *buf);
void oz_trim_elt_pool(struct oz_elt_buf *buf);
#endif /* _OZELTBUF_H */

View File

@ -0,0 +1,835 @@
/* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
#include <linux/errno.h>
#include "ozconfig.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "oztrace.h"
#include "ozalloc.h"
#include "ozevent.h"
#include "ozcdev.h"
#include "ozusbsvc.h"
#include <asm/unaligned.h>
#include <linux/uaccess.h>
#include <net/psnap.h>
/*------------------------------------------------------------------------------
*/
#define OZ_MAX_TX_POOL_SIZE 6
/* Maximum number of uncompleted isoc frames that can be pending.
*/
#define OZ_MAX_SUBMITTED_ISOC 16
/*------------------------------------------------------------------------------
*/
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
static int oz_send_isoc_frame(struct oz_pd *pd);
static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
static void oz_isoc_stream_free(struct oz_isoc_stream *st);
static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data);
static void oz_isoc_destructor(struct sk_buff *skb);
static int oz_def_app_init(void);
static void oz_def_app_term(void);
static int oz_def_app_start(struct oz_pd *pd, int resume);
static void oz_def_app_stop(struct oz_pd *pd, int pause);
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
/*------------------------------------------------------------------------------
* Counts the uncompleted isoc frames submitted to netcard.
*/
static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
/* Application handler functions.
*/
static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
{oz_usb_init,
oz_usb_term,
oz_usb_start,
oz_usb_stop,
oz_usb_rx,
oz_usb_heartbeat,
oz_usb_farewell,
OZ_APPID_USB},
{oz_def_app_init,
oz_def_app_term,
oz_def_app_start,
oz_def_app_stop,
oz_def_app_rx,
0,
0,
OZ_APPID_UNUSED1},
{oz_def_app_init,
oz_def_app_term,
oz_def_app_start,
oz_def_app_stop,
oz_def_app_rx,
0,
0,
OZ_APPID_UNUSED2},
{oz_cdev_init,
oz_cdev_term,
oz_cdev_start,
oz_cdev_stop,
oz_cdev_rx,
0,
0,
OZ_APPID_SERIAL},
};
/*------------------------------------------------------------------------------
* Context: process
*/
static int oz_def_app_init(void)
{
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
static void oz_def_app_term(void)
{
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static int oz_def_app_start(struct oz_pd *pd, int resume)
{
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static void oz_def_app_stop(struct oz_pd *pd, int pause)
{
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
{
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_set_state(struct oz_pd *pd, unsigned state)
{
pd->state = state;
oz_event_log(OZ_EVT_PD_STATE, 0, 0, 0, state);
#ifdef WANT_TRACE
switch (state) {
case OZ_PD_S_IDLE:
oz_trace("PD State: OZ_PD_S_IDLE\n");
break;
case OZ_PD_S_CONNECTED:
oz_trace("PD State: OZ_PD_S_CONNECTED\n");
break;
case OZ_PD_S_STOPPED:
oz_trace("PD State: OZ_PD_S_STOPPED\n");
break;
case OZ_PD_S_SLEEP:
oz_trace("PD State: OZ_PD_S_SLEEP\n");
break;
}
#endif /* WANT_TRACE */
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_get(struct oz_pd *pd)
{
atomic_inc(&pd->ref_count);
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_put(struct oz_pd *pd)
{
if (atomic_dec_and_test(&pd->ref_count))
oz_pd_destroy(pd);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
struct oz_pd *oz_pd_alloc(u8 *mac_addr)
{
struct oz_pd *pd = oz_alloc(sizeof(struct oz_pd), GFP_ATOMIC);
if (pd) {
int i;
memset(pd, 0, sizeof(struct oz_pd));
atomic_set(&pd->ref_count, 2);
for (i = 0; i < OZ_APPID_MAX; i++)
spin_lock_init(&pd->app_lock[i]);
pd->last_rx_pkt_num = 0xffffffff;
oz_pd_set_state(pd, OZ_PD_S_IDLE);
pd->max_tx_size = OZ_MAX_TX_SIZE;
memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
if (0 != oz_elt_buf_init(&pd->elt_buff)) {
oz_free(pd);
pd = 0;
}
spin_lock_init(&pd->tx_frame_lock);
INIT_LIST_HEAD(&pd->tx_queue);
INIT_LIST_HEAD(&pd->farewell_list);
pd->last_sent_frame = &pd->tx_queue;
spin_lock_init(&pd->stream_lock);
INIT_LIST_HEAD(&pd->stream_list);
}
return pd;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_destroy(struct oz_pd *pd)
{
struct list_head *e;
struct oz_tx_frame *f;
struct oz_isoc_stream *st;
struct oz_farewell *fwell;
oz_trace("Destroying PD\n");
/* Delete any streams.
*/
e = pd->stream_list.next;
while (e != &pd->stream_list) {
st = container_of(e, struct oz_isoc_stream, link);
e = e->next;
oz_isoc_stream_free(st);
}
/* Free any queued tx frames.
*/
e = pd->tx_queue.next;
while (e != &pd->tx_queue) {
f = container_of(e, struct oz_tx_frame, link);
e = e->next;
oz_retire_frame(pd, f);
}
oz_elt_buf_term(&pd->elt_buff);
/* Free any farewells.
*/
e = pd->farewell_list.next;
while (e != &pd->farewell_list) {
fwell = container_of(e, struct oz_farewell, link);
e = e->next;
oz_free(fwell);
}
/* Deallocate all frames in tx pool.
*/
while (pd->tx_pool) {
e = pd->tx_pool;
pd->tx_pool = e->next;
oz_free(container_of(e, struct oz_tx_frame, link));
}
if (pd->net_dev)
dev_put(pd->net_dev);
oz_free(pd);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
{
struct oz_app_if *ai;
int rc = 0;
oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
if (ai->start(pd, resume)) {
rc = -1;
oz_trace("Unabled to start service %d\n",
ai->app_id);
break;
}
oz_polling_lock_bh();
pd->total_apps |= (1<<ai->app_id);
if (resume)
pd->paused_apps &= ~(1<<ai->app_id);
oz_polling_unlock_bh();
}
}
return rc;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
{
struct oz_app_if *ai;
oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
oz_polling_lock_bh();
if (pause) {
pd->paused_apps |= (1<<ai->app_id);
} else {
pd->total_apps &= ~(1<<ai->app_id);
pd->paused_apps &= ~(1<<ai->app_id);
}
oz_polling_unlock_bh();
ai->stop(pd, pause);
}
}
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
{
struct oz_app_if *ai;
int more = 0;
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (ai->heartbeat && (apps & (1<<ai->app_id))) {
if (ai->heartbeat(pd))
more = 1;
}
}
if (more)
oz_pd_request_heartbeat(pd);
if (pd->mode & OZ_F_ISOC_ANYTIME) {
int count = 8;
while (count-- && (oz_send_isoc_frame(pd) >= 0))
;
}
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_stop(struct oz_pd *pd)
{
u16 stop_apps = 0;
oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
oz_pd_indicate_farewells(pd);
oz_polling_lock_bh();
stop_apps = pd->total_apps;
pd->total_apps = 0;
pd->paused_apps = 0;
oz_polling_unlock_bh();
oz_services_stop(pd, stop_apps, 0);
oz_polling_lock_bh();
oz_pd_set_state(pd, OZ_PD_S_STOPPED);
/* Remove from PD list.*/
list_del(&pd->link);
oz_polling_unlock_bh();
oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
oz_timer_delete(pd, 0);
oz_pd_put(pd);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_pd_sleep(struct oz_pd *pd)
{
int do_stop = 0;
u16 stop_apps = 0;
oz_polling_lock_bh();
if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
oz_polling_unlock_bh();
return 0;
}
if (pd->keep_alive_j && pd->session_id) {
oz_pd_set_state(pd, OZ_PD_S_SLEEP);
pd->pulse_time_j = jiffies + pd->keep_alive_j;
oz_trace("Sleep Now %lu until %lu\n",
jiffies, pd->pulse_time_j);
} else {
do_stop = 1;
}
stop_apps = pd->total_apps;
oz_polling_unlock_bh();
if (do_stop) {
oz_pd_stop(pd);
} else {
oz_services_stop(pd, stop_apps, 1);
oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
}
return do_stop;
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
{
struct oz_tx_frame *f = 0;
spin_lock_bh(&pd->tx_frame_lock);
if (pd->tx_pool) {
f = container_of(pd->tx_pool, struct oz_tx_frame, link);
pd->tx_pool = pd->tx_pool->next;
pd->tx_pool_count--;
}
spin_unlock_bh(&pd->tx_frame_lock);
if (f == 0)
f = oz_alloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
if (f) {
f->total_size = sizeof(struct oz_hdr);
INIT_LIST_HEAD(&f->link);
INIT_LIST_HEAD(&f->elt_list);
}
return f;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
{
spin_lock_bh(&pd->tx_frame_lock);
if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
f->link.next = pd->tx_pool;
pd->tx_pool = &f->link;
pd->tx_pool_count++;
f = 0;
} else {
oz_free(f);
}
spin_unlock_bh(&pd->tx_frame_lock);
if (f)
oz_free(f);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_prepare_frame(struct oz_pd *pd, int empty)
{
struct oz_tx_frame *f;
if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
return -1;
if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
return -1;
if (!empty && !oz_are_elts_available(&pd->elt_buff))
return -1;
f = oz_tx_frame_alloc(pd);
if (f == 0)
return -1;
f->hdr.control =
(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
++pd->last_tx_pkt_num;
put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
if (empty == 0) {
oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
pd->max_tx_size, &f->elt_list);
}
spin_lock(&pd->tx_frame_lock);
list_add_tail(&f->link, &pd->tx_queue);
pd->nb_queued_frames++;
spin_unlock(&pd->tx_frame_lock);
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
{
struct sk_buff *skb = 0;
struct net_device *dev = pd->net_dev;
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
struct list_head *e;
/* Allocate skb with enough space for the lower layers as well
* as the space we need.
*/
skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == 0)
return 0;
/* Reserve the head room for lower layers.
*/
skb_reserve(skb, LL_RESERVED_SPACE(dev));
skb_reset_network_header(skb);
skb->dev = dev;
skb->protocol = htons(OZ_ETHERTYPE);
if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
dev->dev_addr, skb->len) < 0)
goto fail;
/* Push the tail to the end of the area we are going to copy to.
*/
oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
/* Copy the elements into the frame body.
*/
elt = (struct oz_elt *)(oz_hdr+1);
for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
struct oz_elt_info *ei;
ei = container_of(e, struct oz_elt_info, link);
memcpy(elt, ei->data, ei->length);
elt = oz_next_elt(elt);
}
return skb;
fail:
kfree_skb(skb);
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
{
struct list_head *e;
struct oz_elt_info *ei;
e = f->elt_list.next;
while (e != &f->elt_list) {
ei = container_of(e, struct oz_elt_info, link);
e = e->next;
list_del_init(&ei->link);
if (ei->callback)
ei->callback(pd, ei->context);
spin_lock_bh(&pd->elt_buff.lock);
oz_elt_info_free(&pd->elt_buff, ei);
spin_unlock_bh(&pd->elt_buff.lock);
}
oz_tx_frame_free(pd, f);
if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
oz_trim_elt_pool(&pd->elt_buff);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data)
{
struct sk_buff *skb;
struct oz_tx_frame *f;
struct list_head *e;
*more_data = 0;
spin_lock(&pd->tx_frame_lock);
e = pd->last_sent_frame->next;
if (e == &pd->tx_queue) {
spin_unlock(&pd->tx_frame_lock);
return -1;
}
pd->last_sent_frame = e;
if (e->next != &pd->tx_queue)
*more_data = 1;
f = container_of(e, struct oz_tx_frame, link);
skb = oz_build_frame(pd, f);
spin_unlock(&pd->tx_frame_lock);
oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
if (skb) {
oz_event_log(OZ_EVT_TX_FRAME,
0,
(((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
0, f->hdr.pkt_num);
if (dev_queue_xmit(skb) < 0)
return -1;
}
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
void oz_send_queued_frames(struct oz_pd *pd, int backlog)
{
int more;
if (backlog < OZ_MAX_QUEUED_FRAMES) {
if (oz_send_next_queued_frame(pd, &more) >= 0) {
while (more && oz_send_next_queued_frame(pd, &more))
;
} else {
if (((pd->mode & OZ_F_ISOC_ANYTIME) == 0)
|| (pd->isoc_sent == 0)) {
if (oz_prepare_frame(pd, 1) >= 0)
oz_send_next_queued_frame(pd, &more);
}
}
} else {
oz_send_next_queued_frame(pd, &more);
}
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static int oz_send_isoc_frame(struct oz_pd *pd)
{
struct sk_buff *skb = 0;
struct net_device *dev = pd->net_dev;
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
struct list_head *e;
struct list_head list;
int total_size = sizeof(struct oz_hdr);
INIT_LIST_HEAD(&list);
oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
pd->max_tx_size, &list);
if (list.next == &list)
return 0;
skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == 0) {
oz_trace("Cannot alloc skb\n");
oz_elt_info_free_chain(&pd->elt_buff, &list);
return -1;
}
skb_reserve(skb, LL_RESERVED_SPACE(dev));
skb_reset_network_header(skb);
skb->dev = dev;
skb->protocol = htons(OZ_ETHERTYPE);
if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
dev->dev_addr, skb->len) < 0) {
kfree_skb(skb);
return -1;
}
oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
elt = (struct oz_elt *)(oz_hdr+1);
for (e = list.next; e != &list; e = e->next) {
struct oz_elt_info *ei;
ei = container_of(e, struct oz_elt_info, link);
memcpy(elt, ei->data, ei->length);
elt = oz_next_elt(elt);
}
oz_event_log(OZ_EVT_TX_ISOC, 0, 0, 0, 0);
dev_queue_xmit(skb);
oz_elt_info_free_chain(&pd->elt_buff, &list);
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
{
struct list_head *e;
struct oz_tx_frame *f;
struct list_head *first = 0;
struct list_head *last = 0;
u8 diff;
u32 pkt_num;
spin_lock(&pd->tx_frame_lock);
e = pd->tx_queue.next;
while (e != &pd->tx_queue) {
f = container_of(e, struct oz_tx_frame, link);
pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
if (diff > OZ_LAST_PN_HALF_CYCLE)
break;
if (first == 0)
first = e;
last = e;
e = e->next;
pd->nb_queued_frames--;
}
if (first) {
last->next->prev = &pd->tx_queue;
pd->tx_queue.next = last->next;
last->next = 0;
}
pd->last_sent_frame = &pd->tx_queue;
spin_unlock(&pd->tx_frame_lock);
while (first) {
f = container_of(first, struct oz_tx_frame, link);
first = first->next;
oz_retire_frame(pd, f);
}
}
/*------------------------------------------------------------------------------
* Precondition: stream_lock must be held.
* Context: softirq
*/
static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
{
struct list_head *e;
struct oz_isoc_stream *st;
list_for_each(e, &pd->stream_list) {
st = container_of(e, struct oz_isoc_stream, link);
if (st->ep_num == ep_num)
return st;
}
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
{
struct oz_isoc_stream *st =
oz_alloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
if (!st)
return -1;
memset(st, 0, sizeof(struct oz_isoc_stream));
st->ep_num = ep_num;
spin_lock_bh(&pd->stream_lock);
if (!pd_stream_find(pd, ep_num)) {
list_add(&st->link, &pd->stream_list);
st = 0;
}
spin_unlock_bh(&pd->stream_lock);
if (st)
oz_free(st);
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
static void oz_isoc_stream_free(struct oz_isoc_stream *st)
{
if (st->skb)
kfree_skb(st->skb);
oz_free(st);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
{
struct oz_isoc_stream *st;
spin_lock_bh(&pd->stream_lock);
st = pd_stream_find(pd, ep_num);
if (st)
list_del(&st->link);
spin_unlock_bh(&pd->stream_lock);
if (st)
oz_isoc_stream_free(st);
return 0;
}
/*------------------------------------------------------------------------------
* Context: any
*/
static void oz_isoc_destructor(struct sk_buff *skb)
{
atomic_dec(&g_submitted_isoc);
oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
0, skb, 0);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
{
struct net_device *dev = pd->net_dev;
struct oz_isoc_stream *st;
u8 nb_units = 0;
struct sk_buff *skb = 0;
struct oz_hdr *oz_hdr = 0;
int size = 0;
spin_lock_bh(&pd->stream_lock);
st = pd_stream_find(pd, ep_num);
if (st) {
skb = st->skb;
st->skb = 0;
nb_units = st->nb_units;
st->nb_units = 0;
oz_hdr = st->oz_hdr;
size = st->size;
}
spin_unlock_bh(&pd->stream_lock);
if (!st)
return 0;
if (!skb) {
/* Allocate enough space for max size frame. */
skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
GFP_ATOMIC);
if (skb == 0)
return 0;
/* Reserve the head room for lower layers. */
skb_reserve(skb, LL_RESERVED_SPACE(dev));
skb_reset_network_header(skb);
skb->dev = dev;
skb->protocol = htons(OZ_ETHERTYPE);
size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
oz_hdr = (struct oz_hdr *)skb_put(skb, size);
}
memcpy(skb_put(skb, len), data, len);
size += len;
if (++nb_units < pd->ms_per_isoc) {
spin_lock_bh(&pd->stream_lock);
st->skb = skb;
st->nb_units = nb_units;
st->oz_hdr = oz_hdr;
st->size = size;
spin_unlock_bh(&pd->stream_lock);
} else {
struct oz_hdr oz;
struct oz_isoc_large iso;
spin_lock_bh(&pd->stream_lock);
iso.frame_number = st->frame_num;
st->frame_num += nb_units;
spin_unlock_bh(&pd->stream_lock);
oz.control =
(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
oz.pkt_num = 0;
iso.endpoint = ep_num;
iso.format = OZ_DATA_F_ISOC_LARGE;
iso.ms_data = nb_units;
memcpy(oz_hdr, &oz, sizeof(oz));
memcpy(oz_hdr+1, &iso, sizeof(iso));
if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
dev->dev_addr, skb->len) < 0) {
kfree_skb(skb);
return -1;
}
if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
skb->destructor = oz_isoc_destructor;
atomic_inc(&g_submitted_isoc);
oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
skb, atomic_read(&g_submitted_isoc));
if (dev_queue_xmit(skb) < 0)
return -1;
} else {
oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
kfree_skb(skb);
}
}
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
void oz_apps_init(void)
{
int i;
for (i = 0; i < OZ_APPID_MAX; i++)
if (g_app_if[i].init)
g_app_if[i].init();
}
/*------------------------------------------------------------------------------
* Context: process
*/
void oz_apps_term(void)
{
int i;
/* Terminate all the apps. */
for (i = 0; i < OZ_APPID_MAX; i++)
if (g_app_if[i].term)
g_app_if[i].term();
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
{
struct oz_app_if *ai;
if (app_id > OZ_APPID_MAX)
return;
ai = &g_app_if[app_id-1];
ai->rx(pd, elt);
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_indicate_farewells(struct oz_pd *pd)
{
struct oz_farewell *f;
struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
while (1) {
oz_polling_lock_bh();
if (list_empty(&pd->farewell_list)) {
oz_polling_unlock_bh();
break;
}
f = list_first_entry(&pd->farewell_list,
struct oz_farewell, link);
list_del(&f->link);
oz_polling_unlock_bh();
if (ai->farewell)
ai->farewell(pd, f->ep_num, f->report, f->len);
oz_free(f);
}
}

View File

@ -0,0 +1,121 @@
/* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
#ifndef _OZPD_H_
#define _OZPD_H_
#include "ozeltbuf.h"
/* PD state
*/
#define OZ_PD_S_IDLE 0x1
#define OZ_PD_S_CONNECTED 0x2
#define OZ_PD_S_SLEEP 0x4
#define OZ_PD_S_STOPPED 0x8
/* Timer event types.
*/
#define OZ_TIMER_TOUT 1
#define OZ_TIMER_HEARTBEAT 2
#define OZ_TIMER_STOP 3
/* Data structure that hold information on a frame for transmisson. This is
* built when the frame is first transmitted and is used to rebuild the frame
* if a re-transmission is required.
*/
struct oz_tx_frame {
struct list_head link;
struct list_head elt_list;
struct oz_hdr hdr;
int total_size;
};
struct oz_isoc_stream {
struct list_head link;
u8 ep_num;
u8 frame_num;
u8 nb_units;
int size;
struct sk_buff *skb;
struct oz_hdr *oz_hdr;
};
struct oz_farewell {
struct list_head link;
u8 ep_num;
u8 index;
u8 report[1];
u8 len;
};
/* Data structure that holds information on a specific peripheral device (PD).
*/
struct oz_pd {
struct list_head link;
atomic_t ref_count;
u8 mac_addr[ETH_ALEN];
unsigned state;
unsigned state_flags;
unsigned send_flags;
u16 total_apps;
u16 paused_apps;
u8 session_id;
u8 param_rsp_status;
u8 pd_info;
u8 isoc_sent;
u32 last_rx_pkt_num;
u32 last_tx_pkt_num;
u32 trigger_pkt_num;
unsigned long pulse_time_j;
unsigned long timeout_time_j;
unsigned long pulse_period_j;
unsigned long presleep_j;
unsigned long keep_alive_j;
unsigned long last_rx_time_j;
struct oz_elt_buf elt_buff;
void *app_ctx[OZ_APPID_MAX];
spinlock_t app_lock[OZ_APPID_MAX];
int max_tx_size;
u8 heartbeat_requested;
u8 mode;
u8 ms_per_isoc;
unsigned max_stream_buffering;
int nb_queued_frames;
struct list_head *tx_pool;
int tx_pool_count;
spinlock_t tx_frame_lock;
struct list_head *last_sent_frame;
struct list_head tx_queue;
struct list_head farewell_list;
spinlock_t stream_lock;
struct list_head stream_list;
struct net_device *net_dev;
};
#define OZ_MAX_QUEUED_FRAMES 4
struct oz_pd *oz_pd_alloc(u8 *mac_addr);
void oz_pd_destroy(struct oz_pd *pd);
void oz_pd_get(struct oz_pd *pd);
void oz_pd_put(struct oz_pd *pd);
void oz_pd_set_state(struct oz_pd *pd, unsigned state);
void oz_pd_indicate_farewells(struct oz_pd *pd);
int oz_pd_sleep(struct oz_pd *pd);
void oz_pd_stop(struct oz_pd *pd);
void oz_pd_heartbeat(struct oz_pd *pd, u16 apps);
int oz_services_start(struct oz_pd *pd, u16 apps, int resume);
void oz_services_stop(struct oz_pd *pd, u16 apps, int pause);
int oz_prepare_frame(struct oz_pd *pd, int empty);
void oz_send_queued_frames(struct oz_pd *pd, int backlog);
void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn);
int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num);
int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num);
int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len);
void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt);
void oz_apps_init(void);
void oz_apps_term(void);
#endif /* Sentry */