1/* The industrial I/O simple minimally locked ring buffer. 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 */ 9 10#include <linux/slab.h> 11#include <linux/kernel.h> 12#include <linux/module.h> 13#include <linux/device.h> 14#include <linux/workqueue.h> 15#include "ring_sw.h" 16#include "trigger.h" 17 18static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring, 19 int bytes_per_datum, int length) 20{ 21 if ((length == 0) || (bytes_per_datum == 0)) 22 return -EINVAL; 23 __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length); 24 ring->data = kmalloc(length*ring->buf.bpd, GFP_ATOMIC); 25 ring->read_p = NULL; 26 ring->write_p = NULL; 27 ring->last_written_p = NULL; 28 ring->half_p = NULL; 29 return ring->data ? 0 : -ENOMEM; 30} 31 32static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring) 33{ 34 spin_lock_init(&ring->use_lock); 35} 36 37static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring) 38{ 39 kfree(ring->data); 40} 41 42void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r) 43{ 44 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 45 spin_lock(&ring->use_lock); 46 ring->use_count++; 47 spin_unlock(&ring->use_lock); 48} 49EXPORT_SYMBOL(iio_mark_sw_rb_in_use); 50 51void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r) 52{ 53 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 54 spin_lock(&ring->use_lock); 55 ring->use_count--; 56 spin_unlock(&ring->use_lock); 57} 58EXPORT_SYMBOL(iio_unmark_sw_rb_in_use); 59 60 61/* Ring buffer related functionality */ 62/* Store to ring is typically called in the bh of a data ready interrupt handler 63 * in the device driver */ 64/* Lock always held if their is a chance this may be called */ 65/* Only one of these per ring may run concurrently - enforced by drivers */ 66static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring, 67 unsigned char *data, s64 timestamp) 68{ 69 int ret = 0; 70 int code; 71 unsigned char *temp_ptr, *change_test_ptr; 72 73 /* initial store */ 74 if (unlikely(ring->write_p == NULL)) { 75 ring->write_p = ring->data; 76 /* Doesn't actually matter if this is out of the set 77 * as long as the read pointer is valid before this 78 * passes it - guaranteed as set later in this function. 79 */ 80 ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2; 81 } 82 /* Copy data to where ever the current write pointer says */ 83 memcpy(ring->write_p, data, ring->buf.bpd); 84 barrier(); 85 /* Update the pointer used to get most recent value. 86 * Always valid as either points to latest or second latest value. 87 * Before this runs it is null and read attempts fail with -EAGAIN. 88 */ 89 ring->last_written_p = ring->write_p; 90 barrier(); 91 /* temp_ptr used to ensure we never have an invalid pointer 92 * it may be slightly lagging, but never invalid 93 */ 94 temp_ptr = ring->write_p + ring->buf.bpd; 95 /* End of ring, back to the beginning */ 96 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd) 97 temp_ptr = ring->data; 98 /* Update the write pointer 99 * always valid as long as this is the only function able to write. 100 * Care needed with smp systems to ensure more than one ring fill 101 * is never scheduled. 102 */ 103 ring->write_p = temp_ptr; 104 105 if (ring->read_p == NULL) 106 ring->read_p = ring->data; 107 /* Buffer full - move the read pointer and create / escalate 108 * ring event */ 109 /* Tricky case - if the read pointer moves before we adjust it. 110 * Handle by not pushing if it has moved - may result in occasional 111 * unnecessary buffer full events when it wasn't quite true. 112 */ 113 else if (ring->write_p == ring->read_p) { 114 change_test_ptr = ring->read_p; 115 temp_ptr = change_test_ptr + ring->buf.bpd; 116 if (temp_ptr 117 == ring->data + ring->buf.length*ring->buf.bpd) { 118 temp_ptr = ring->data; 119 } 120 /* We are moving pointer on one because the ring is full. Any 121 * change to the read pointer will be this or greater. 122 */ 123 if (change_test_ptr == ring->read_p) 124 ring->read_p = temp_ptr; 125 126 spin_lock(&ring->buf.shared_ev_pointer.lock); 127 128 ret = iio_push_or_escallate_ring_event(&ring->buf, 129 IIO_EVENT_CODE_RING_100_FULL, timestamp); 130 spin_unlock(&ring->buf.shared_ev_pointer.lock); 131 if (ret) 132 goto error_ret; 133 } 134 /* investigate if our event barrier has been passed */ 135 /* There are definite 'issues' with this and chances of 136 * simultaneous read */ 137 /* Also need to use loop count to ensure this only happens once */ 138 ring->half_p += ring->buf.bpd; 139 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd) 140 ring->half_p = ring->data; 141 if (ring->half_p == ring->read_p) { 142 spin_lock(&ring->buf.shared_ev_pointer.lock); 143 code = IIO_EVENT_CODE_RING_50_FULL; 144 ret = __iio_push_event(&ring->buf.ev_int, 145 code, 146 timestamp, 147 &ring->buf.shared_ev_pointer); 148 spin_unlock(&ring->buf.shared_ev_pointer.lock); 149 } 150error_ret: 151 return ret; 152} 153 154int iio_rip_sw_rb(struct iio_ring_buffer *r, 155 size_t count, u8 **data, int *dead_offset) 156{ 157 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 158 159 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p; 160 int ret, max_copied; 161 int bytes_to_rip; 162 163 /* A userspace program has probably made an error if it tries to 164 * read something that is not a whole number of bpds. 165 * Return an error. 166 */ 167 if (count % ring->buf.bpd) { 168 ret = -EINVAL; 169 printk(KERN_INFO "Ring buffer read request not whole number of" 170 "samples: Request bytes %zd, Current bpd %d\n", 171 count, ring->buf.bpd); 172 goto error_ret; 173 } 174 /* Limit size to whole of ring buffer */ 175 bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count); 176 177 *data = kmalloc(bytes_to_rip, GFP_KERNEL); 178 if (*data == NULL) { 179 ret = -ENOMEM; 180 goto error_ret; 181 } 182 183 /* build local copy */ 184 initial_read_p = ring->read_p; 185 if (unlikely(initial_read_p == NULL)) { /* No data here as yet */ 186 ret = 0; 187 goto error_free_data_cpy; 188 } 189 190 initial_write_p = ring->write_p; 191 192 /* Need a consistent pair */ 193 while ((initial_read_p != ring->read_p) 194 || (initial_write_p != ring->write_p)) { 195 initial_read_p = ring->read_p; 196 initial_write_p = ring->write_p; 197 } 198 if (initial_write_p == initial_read_p) { 199 /* No new data available.*/ 200 ret = 0; 201 goto error_free_data_cpy; 202 } 203 204 if (initial_write_p >= initial_read_p + bytes_to_rip) { 205 /* write_p is greater than necessary, all is easy */ 206 max_copied = bytes_to_rip; 207 memcpy(*data, initial_read_p, max_copied); 208 end_read_p = initial_read_p + max_copied; 209 } else if (initial_write_p > initial_read_p) { 210 /*not enough data to cpy */ 211 max_copied = initial_write_p - initial_read_p; 212 memcpy(*data, initial_read_p, max_copied); 213 end_read_p = initial_write_p; 214 } else { 215 /* going through 'end' of ring buffer */ 216 max_copied = ring->data 217 + ring->buf.length*ring->buf.bpd - initial_read_p; 218 memcpy(*data, initial_read_p, max_copied); 219 /* possible we are done if we align precisely with end */ 220 if (max_copied == bytes_to_rip) 221 end_read_p = ring->data; 222 else if (initial_write_p 223 > ring->data + bytes_to_rip - max_copied) { 224 /* enough data to finish */ 225 memcpy(*data + max_copied, ring->data, 226 bytes_to_rip - max_copied); 227 max_copied = bytes_to_rip; 228 end_read_p = ring->data + (bytes_to_rip - max_copied); 229 } else { /* not enough data */ 230 memcpy(*data + max_copied, ring->data, 231 initial_write_p - ring->data); 232 max_copied += initial_write_p - ring->data; 233 end_read_p = initial_write_p; 234 } 235 } 236 /* Now to verify which section was cleanly copied - i.e. how far 237 * read pointer has been pushed */ 238 current_read_p = ring->read_p; 239 240 if (initial_read_p <= current_read_p) 241 *dead_offset = current_read_p - initial_read_p; 242 else 243 *dead_offset = ring->buf.length*ring->buf.bpd 244 - (initial_read_p - current_read_p); 245 246 /* possible issue if the initial write has been lapped or indeed 247 * the point we were reading to has been passed */ 248 /* No valid data read. 249 * In this case the read pointer is already correct having been 250 * pushed further than we would look. */ 251 if (max_copied - *dead_offset < 0) { 252 ret = 0; 253 goto error_free_data_cpy; 254 } 255 256 /* setup the next read position */ 257 /* Beware, this may fail due to concurrency fun and games. 258 * Possible that sufficient fill commands have run to push the read 259 * pointer past where we would be after the rip. If this occurs, leave 260 * it be. 261 */ 262 /* Tricky - deal with loops */ 263 264 while (ring->read_p != end_read_p) 265 ring->read_p = end_read_p; 266 267 return max_copied - *dead_offset; 268 269error_free_data_cpy: 270 kfree(*data); 271error_ret: 272 return ret; 273} 274EXPORT_SYMBOL(iio_rip_sw_rb); 275 276int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp) 277{ 278 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 279 return iio_store_to_sw_ring(ring, data, timestamp); 280} 281EXPORT_SYMBOL(iio_store_to_sw_rb); 282 283static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring, 284 unsigned char *data) 285{ 286 unsigned char *last_written_p_copy; 287 288 iio_mark_sw_rb_in_use(&ring->buf); 289again: 290 barrier(); 291 last_written_p_copy = ring->last_written_p; 292 barrier(); /*unnessecary? */ 293 /* Check there is anything here */ 294 if (last_written_p_copy == NULL) 295 return -EAGAIN; 296 memcpy(data, last_written_p_copy, ring->buf.bpd); 297 298 if (unlikely(ring->last_written_p != last_written_p_copy)) 299 goto again; 300 301 iio_unmark_sw_rb_in_use(&ring->buf); 302 return 0; 303} 304 305int iio_read_last_from_sw_rb(struct iio_ring_buffer *r, 306 unsigned char *data) 307{ 308 return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data); 309} 310EXPORT_SYMBOL(iio_read_last_from_sw_rb); 311 312int iio_request_update_sw_rb(struct iio_ring_buffer *r) 313{ 314 int ret = 0; 315 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 316 317 spin_lock(&ring->use_lock); 318 if (!ring->update_needed) 319 goto error_ret; 320 if (ring->use_count) { 321 ret = -EAGAIN; 322 goto error_ret; 323 } 324 __iio_free_sw_ring_buffer(ring); 325 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bpd, 326 ring->buf.length); 327error_ret: 328 spin_unlock(&ring->use_lock); 329 return ret; 330} 331EXPORT_SYMBOL(iio_request_update_sw_rb); 332 333int iio_get_bpd_sw_rb(struct iio_ring_buffer *r) 334{ 335 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 336 return ring->buf.bpd; 337} 338EXPORT_SYMBOL(iio_get_bpd_sw_rb); 339 340int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd) 341{ 342 if (r->bpd != bpd) { 343 r->bpd = bpd; 344 if (r->access.mark_param_change) 345 r->access.mark_param_change(r); 346 } 347 return 0; 348} 349EXPORT_SYMBOL(iio_set_bpd_sw_rb); 350 351int iio_get_length_sw_rb(struct iio_ring_buffer *r) 352{ 353 return r->length; 354} 355EXPORT_SYMBOL(iio_get_length_sw_rb); 356 357int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length) 358{ 359 if (r->length != length) { 360 r->length = length; 361 if (r->access.mark_param_change) 362 r->access.mark_param_change(r); 363 } 364 return 0; 365} 366EXPORT_SYMBOL(iio_set_length_sw_rb); 367 368int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r) 369{ 370 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 371 ring->update_needed = true; 372 return 0; 373} 374EXPORT_SYMBOL(iio_mark_update_needed_sw_rb); 375 376static void iio_sw_rb_release(struct device *dev) 377{ 378 struct iio_ring_buffer *r = to_iio_ring_buffer(dev); 379 kfree(iio_to_sw_ring(r)); 380} 381 382static IIO_RING_ENABLE_ATTR; 383static IIO_RING_BPS_ATTR; 384static IIO_RING_LENGTH_ATTR; 385 386/* Standard set of ring buffer attributes */ 387static struct attribute *iio_ring_attributes[] = { 388 &dev_attr_length.attr, 389 &dev_attr_bps.attr, 390 &dev_attr_ring_enable.attr, 391 NULL, 392}; 393 394static struct attribute_group iio_ring_attribute_group = { 395 .attrs = iio_ring_attributes, 396}; 397 398static const struct attribute_group *iio_ring_attribute_groups[] = { 399 &iio_ring_attribute_group, 400 NULL 401}; 402 403static struct device_type iio_sw_ring_type = { 404 .release = iio_sw_rb_release, 405 .groups = iio_ring_attribute_groups, 406}; 407 408struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev) 409{ 410 struct iio_ring_buffer *buf; 411 struct iio_sw_ring_buffer *ring; 412 413 ring = kzalloc(sizeof *ring, GFP_KERNEL); 414 if (!ring) 415 return NULL; 416 buf = &ring->buf; 417 iio_ring_buffer_init(buf, indio_dev); 418 __iio_init_sw_ring_buffer(ring); 419 buf->dev.type = &iio_sw_ring_type; 420 device_initialize(&buf->dev); 421 buf->dev.parent = &indio_dev->dev; 422 buf->dev.bus = &iio_bus_type; 423 dev_set_drvdata(&buf->dev, (void *)buf); 424 425 return buf; 426} 427EXPORT_SYMBOL(iio_sw_rb_allocate); 428 429void iio_sw_rb_free(struct iio_ring_buffer *r) 430{ 431 if (r) 432 iio_put_ring_buffer(r); 433} 434EXPORT_SYMBOL(iio_sw_rb_free); 435 436int iio_sw_ring_preenable(struct iio_dev *indio_dev) 437{ 438 size_t size; 439 dev_dbg(&indio_dev->dev, "%s\n", __func__); 440 /* Check if there are any scan elements enabled, if not fail*/ 441 if (!(indio_dev->scan_count || indio_dev->scan_timestamp)) 442 return -EINVAL; 443 if (indio_dev->scan_timestamp) 444 if (indio_dev->scan_count) 445 /* Timestamp (aligned to s64) and data */ 446 size = (((indio_dev->scan_count * indio_dev->ring->bpe) 447 + sizeof(s64) - 1) 448 & ~(sizeof(s64) - 1)) 449 + sizeof(s64); 450 else /* Timestamp only */ 451 size = sizeof(s64); 452 else /* Data only */ 453 size = indio_dev->scan_count * indio_dev->ring->bpe; 454 indio_dev->ring->access.set_bpd(indio_dev->ring, size); 455 456 return 0; 457} 458EXPORT_SYMBOL(iio_sw_ring_preenable); 459 460void iio_sw_trigger_bh_to_ring(struct work_struct *work_s) 461{ 462 struct iio_sw_ring_helper_state *st 463 = container_of(work_s, struct iio_sw_ring_helper_state, 464 work_trigger_to_ring); 465 int len = 0; 466 size_t datasize = st->indio_dev 467 ->ring->access.get_bpd(st->indio_dev->ring); 468 char *data = kmalloc(datasize, GFP_KERNEL); 469 470 if (data == NULL) { 471 dev_err(st->indio_dev->dev.parent, 472 "memory alloc failed in ring bh"); 473 return; 474 } 475 476 if (st->indio_dev->scan_count) 477 len = st->get_ring_element(st, data); 478 479 /* Guaranteed to be aligned with 8 byte boundary */ 480 if (st->indio_dev->scan_timestamp) 481 *(s64 *)(((phys_addr_t)data + len 482 + sizeof(s64) - 1) & ~(sizeof(s64) - 1)) 483 = st->last_timestamp; 484 st->indio_dev->ring->access.store_to(st->indio_dev->ring, 485 (u8 *)data, 486 st->last_timestamp); 487 488 iio_trigger_notify_done(st->indio_dev->trig); 489 kfree(data); 490 491 return; 492} 493EXPORT_SYMBOL(iio_sw_trigger_bh_to_ring); 494 495void iio_sw_poll_func_th(struct iio_dev *indio_dev, s64 time) 496{ struct iio_sw_ring_helper_state *h 497 = iio_dev_get_devdata(indio_dev); 498 h->last_timestamp = time; 499 schedule_work(&h->work_trigger_to_ring); 500} 501EXPORT_SYMBOL(iio_sw_poll_func_th); 502 503MODULE_DESCRIPTION("Industrialio I/O software ring buffer"); 504MODULE_LICENSE("GPL"); 505