1/* 2 * drv.c 3 * 4 * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 * 6 * DSP/BIOS Bridge resource allocation module. 7 * 8 * Copyright (C) 2005-2006 Texas Instruments, Inc. 9 * 10 * This package is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 17 */ 18#include <linux/types.h> 19 20/* ----------------------------------- Host OS */ 21#include <dspbridge/host_os.h> 22 23/* ----------------------------------- DSP/BIOS Bridge */ 24#include <dspbridge/dbdefs.h> 25 26/* ----------------------------------- Trace & Debug */ 27#include <dspbridge/dbc.h> 28 29/* ----------------------------------- OS Adaptation Layer */ 30#include <dspbridge/cfg.h> 31#include <dspbridge/list.h> 32 33/* ----------------------------------- This */ 34#include <dspbridge/drv.h> 35#include <dspbridge/dev.h> 36 37#include <dspbridge/node.h> 38#include <dspbridge/proc.h> 39#include <dspbridge/strm.h> 40#include <dspbridge/nodepriv.h> 41#include <dspbridge/dspchnl.h> 42#include <dspbridge/resourcecleanup.h> 43 44/* ----------------------------------- Defines, Data Structures, Typedefs */ 45struct drv_object { 46 struct lst_list *dev_list; 47 struct lst_list *dev_node_string; 48}; 49 50/* 51 * This is the Device Extension. Named with the Prefix 52 * DRV_ since it is living in this module 53 */ 54struct drv_ext { 55 struct list_head link; 56 char sz_string[MAXREGPATHLENGTH]; 57}; 58 59/* ----------------------------------- Globals */ 60static s32 refs; 61static bool ext_phys_mem_pool_enabled; 62struct ext_phys_mem_pool { 63 u32 phys_mem_base; 64 u32 phys_mem_size; 65 u32 virt_mem_base; 66 u32 next_phys_alloc_ptr; 67}; 68static struct ext_phys_mem_pool ext_mem_pool; 69 70/* ----------------------------------- Function Prototypes */ 71static int request_bridge_resources(struct cfg_hostres *res); 72 73 74/* GPP PROCESS CLEANUP CODE */ 75 76static int drv_proc_free_node_res(int id, void *p, void *data); 77 78/* Allocate and add a node resource element 79* This function is called from .Node_Allocate. */ 80int drv_insert_node_res_element(void *hnode, void *node_resource, 81 void *process_ctxt) 82{ 83 struct node_res_object **node_res_obj = 84 (struct node_res_object **)node_resource; 85 struct process_context *ctxt = (struct process_context *)process_ctxt; 86 int status = 0; 87 int retval; 88 89 *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL); 90 if (!*node_res_obj) { 91 status = -ENOMEM; 92 goto func_end; 93 } 94 95 (*node_res_obj)->hnode = hnode; 96 retval = idr_get_new(ctxt->node_id, *node_res_obj, 97 &(*node_res_obj)->id); 98 if (retval == -EAGAIN) { 99 if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) { 100 pr_err("%s: OUT OF MEMORY\n", __func__); 101 status = -ENOMEM; 102 goto func_end; 103 } 104 105 retval = idr_get_new(ctxt->node_id, *node_res_obj, 106 &(*node_res_obj)->id); 107 } 108 if (retval) { 109 pr_err("%s: FAILED, IDR is FULL\n", __func__); 110 status = -EFAULT; 111 } 112func_end: 113 if (status) 114 kfree(*node_res_obj); 115 116 return status; 117} 118 119/* Release all Node resources and its context 120 * Actual Node De-Allocation */ 121static int drv_proc_free_node_res(int id, void *p, void *data) 122{ 123 struct process_context *ctxt = data; 124 int status; 125 struct node_res_object *node_res_obj = p; 126 u32 node_state; 127 128 if (node_res_obj->node_allocated) { 129 node_state = node_get_state(node_res_obj->hnode); 130 if (node_state <= NODE_DELETING) { 131 if ((node_state == NODE_RUNNING) || 132 (node_state == NODE_PAUSED) || 133 (node_state == NODE_TERMINATING)) 134 node_terminate 135 (node_res_obj->hnode, &status); 136 137 node_delete(node_res_obj, ctxt); 138 } 139 } 140 141 return 0; 142} 143 144/* Release all Mapped and Reserved DMM resources */ 145int drv_remove_all_dmm_res_elements(void *process_ctxt) 146{ 147 struct process_context *ctxt = (struct process_context *)process_ctxt; 148 int status = 0; 149 struct dmm_map_object *temp_map, *map_obj; 150 struct dmm_rsv_object *temp_rsv, *rsv_obj; 151 152 /* Free DMM mapped memory resources */ 153 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { 154 status = proc_un_map(ctxt->hprocessor, 155 (void *)map_obj->dsp_addr, ctxt); 156 if (status) 157 pr_err("%s: proc_un_map failed!" 158 " status = 0x%xn", __func__, status); 159 } 160 161 /* Free DMM reserved memory resources */ 162 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) { 163 status = proc_un_reserve_memory(ctxt->hprocessor, (void *) 164 rsv_obj->dsp_reserved_addr, 165 ctxt); 166 if (status) 167 pr_err("%s: proc_un_reserve_memory failed!" 168 " status = 0x%xn", __func__, status); 169 } 170 return status; 171} 172 173/* Update Node allocation status */ 174void drv_proc_node_update_status(void *node_resource, s32 status) 175{ 176 struct node_res_object *node_res_obj = 177 (struct node_res_object *)node_resource; 178 DBC_ASSERT(node_resource != NULL); 179 node_res_obj->node_allocated = status; 180} 181 182/* Update Node Heap status */ 183void drv_proc_node_update_heap_status(void *node_resource, s32 status) 184{ 185 struct node_res_object *node_res_obj = 186 (struct node_res_object *)node_resource; 187 DBC_ASSERT(node_resource != NULL); 188 node_res_obj->heap_allocated = status; 189} 190 191/* Release all Node resources and its context 192* This is called from .bridge_release. 193 */ 194int drv_remove_all_node_res_elements(void *process_ctxt) 195{ 196 struct process_context *ctxt = process_ctxt; 197 198 idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt); 199 idr_destroy(ctxt->node_id); 200 201 return 0; 202} 203 204/* Allocate the STRM resource element 205* This is called after the actual resource is allocated 206 */ 207int drv_proc_insert_strm_res_element(void *stream_obj, 208 void *strm_res, void *process_ctxt) 209{ 210 struct strm_res_object **pstrm_res = 211 (struct strm_res_object **)strm_res; 212 struct process_context *ctxt = (struct process_context *)process_ctxt; 213 int status = 0; 214 int retval; 215 216 *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL); 217 if (*pstrm_res == NULL) { 218 status = -EFAULT; 219 goto func_end; 220 } 221 222 (*pstrm_res)->hstream = stream_obj; 223 retval = idr_get_new(ctxt->stream_id, *pstrm_res, 224 &(*pstrm_res)->id); 225 if (retval == -EAGAIN) { 226 if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) { 227 pr_err("%s: OUT OF MEMORY\n", __func__); 228 status = -ENOMEM; 229 goto func_end; 230 } 231 232 retval = idr_get_new(ctxt->stream_id, *pstrm_res, 233 &(*pstrm_res)->id); 234 } 235 if (retval) { 236 pr_err("%s: FAILED, IDR is FULL\n", __func__); 237 status = -EPERM; 238 } 239 240func_end: 241 return status; 242} 243 244static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt) 245{ 246 struct process_context *ctxt = process_ctxt; 247 struct strm_res_object *strm_res = p; 248 struct stream_info strm_info; 249 struct dsp_streaminfo user; 250 u8 **ap_buffer = NULL; 251 u8 *buf_ptr; 252 u32 ul_bytes; 253 u32 dw_arg; 254 s32 ul_buf_size; 255 256 if (strm_res->num_bufs) { 257 ap_buffer = kmalloc((strm_res->num_bufs * 258 sizeof(u8 *)), GFP_KERNEL); 259 if (ap_buffer) { 260 strm_free_buffer(strm_res, 261 ap_buffer, 262 strm_res->num_bufs, 263 ctxt); 264 kfree(ap_buffer); 265 } 266 } 267 strm_info.user_strm = &user; 268 user.number_bufs_in_stream = 0; 269 strm_get_info(strm_res->hstream, &strm_info, sizeof(strm_info)); 270 while (user.number_bufs_in_stream--) 271 strm_reclaim(strm_res->hstream, &buf_ptr, &ul_bytes, 272 (u32 *) &ul_buf_size, &dw_arg); 273 strm_close(strm_res, ctxt); 274 return 0; 275} 276 277/* Release all Stream resources and its context 278* This is called from .bridge_release. 279 */ 280int drv_remove_all_strm_res_elements(void *process_ctxt) 281{ 282 struct process_context *ctxt = process_ctxt; 283 284 idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt); 285 idr_destroy(ctxt->stream_id); 286 287 return 0; 288} 289 290/* Updating the stream resource element */ 291int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources) 292{ 293 int status = 0; 294 struct strm_res_object **strm_res = 295 (struct strm_res_object **)strm_resources; 296 297 (*strm_res)->num_bufs = num_bufs; 298 return status; 299} 300 301/* GPP PROCESS CLEANUP CODE END */ 302 303/* 304 * ======== = drv_create ======== = 305 * Purpose: 306 * DRV Object gets created only once during Driver Loading. 307 */ 308int drv_create(struct drv_object **drv_obj) 309{ 310 int status = 0; 311 struct drv_object *pdrv_object = NULL; 312 313 DBC_REQUIRE(drv_obj != NULL); 314 DBC_REQUIRE(refs > 0); 315 316 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL); 317 if (pdrv_object) { 318 /* Create and Initialize List of device objects */ 319 pdrv_object->dev_list = kzalloc(sizeof(struct lst_list), 320 GFP_KERNEL); 321 if (pdrv_object->dev_list) { 322 /* Create and Initialize List of device Extension */ 323 pdrv_object->dev_node_string = 324 kzalloc(sizeof(struct lst_list), GFP_KERNEL); 325 if (!(pdrv_object->dev_node_string)) { 326 status = -EPERM; 327 } else { 328 INIT_LIST_HEAD(&pdrv_object-> 329 dev_node_string->head); 330 INIT_LIST_HEAD(&pdrv_object->dev_list->head); 331 } 332 } else { 333 status = -ENOMEM; 334 } 335 } else { 336 status = -ENOMEM; 337 } 338 /* Store the DRV Object in the Registry */ 339 if (!status) 340 status = cfg_set_object((u32) pdrv_object, REG_DRV_OBJECT); 341 if (!status) { 342 *drv_obj = pdrv_object; 343 } else { 344 kfree(pdrv_object->dev_list); 345 kfree(pdrv_object->dev_node_string); 346 /* Free the DRV Object */ 347 kfree(pdrv_object); 348 } 349 350 DBC_ENSURE(status || pdrv_object); 351 return status; 352} 353 354/* 355 * ======== drv_exit ======== 356 * Purpose: 357 * Discontinue usage of the DRV module. 358 */ 359void drv_exit(void) 360{ 361 DBC_REQUIRE(refs > 0); 362 363 refs--; 364 365 DBC_ENSURE(refs >= 0); 366} 367 368/* 369 * ======== = drv_destroy ======== = 370 * purpose: 371 * Invoked during bridge de-initialization 372 */ 373int drv_destroy(struct drv_object *driver_obj) 374{ 375 int status = 0; 376 struct drv_object *pdrv_object = (struct drv_object *)driver_obj; 377 378 DBC_REQUIRE(refs > 0); 379 DBC_REQUIRE(pdrv_object); 380 381 /* 382 * Delete the List if it exists.Should not come here 383 * as the drv_remove_dev_object and the Last drv_request_resources 384 * removes the list if the lists are empty. 385 */ 386 kfree(pdrv_object->dev_list); 387 kfree(pdrv_object->dev_node_string); 388 kfree(pdrv_object); 389 /* Update the DRV Object in Registry to be 0 */ 390 (void)cfg_set_object(0, REG_DRV_OBJECT); 391 392 return status; 393} 394 395/* 396 * ======== drv_get_dev_object ======== 397 * Purpose: 398 * Given a index, returns a handle to DevObject from the list. 399 */ 400int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj, 401 struct dev_object **device_obj) 402{ 403 int status = 0; 404#ifdef CONFIG_TIDSPBRIDGE_DEBUG 405 /* used only for Assertions and debug messages */ 406 struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj; 407#endif 408 struct dev_object *dev_obj; 409 u32 i; 410 DBC_REQUIRE(pdrv_obj); 411 DBC_REQUIRE(device_obj != NULL); 412 DBC_REQUIRE(index >= 0); 413 DBC_REQUIRE(refs > 0); 414 DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj->dev_list))); 415 416 dev_obj = (struct dev_object *)drv_get_first_dev_object(); 417 for (i = 0; i < index; i++) { 418 dev_obj = 419 (struct dev_object *)drv_get_next_dev_object((u32) dev_obj); 420 } 421 if (dev_obj) { 422 *device_obj = (struct dev_object *)dev_obj; 423 } else { 424 *device_obj = NULL; 425 status = -EPERM; 426 } 427 428 return status; 429} 430 431/* 432 * ======== drv_get_first_dev_object ======== 433 * Purpose: 434 * Retrieve the first Device Object handle from an internal linked list of 435 * of DEV_OBJECTs maintained by DRV. 436 */ 437u32 drv_get_first_dev_object(void) 438{ 439 u32 dw_dev_object = 0; 440 struct drv_object *pdrv_obj; 441 442 if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) { 443 if ((pdrv_obj->dev_list != NULL) && 444 !LST_IS_EMPTY(pdrv_obj->dev_list)) 445 dw_dev_object = (u32) lst_first(pdrv_obj->dev_list); 446 } 447 448 return dw_dev_object; 449} 450 451/* 452 * ======== DRV_GetFirstDevNodeString ======== 453 * Purpose: 454 * Retrieve the first Device Extension from an internal linked list of 455 * of Pointer to dev_node Strings maintained by DRV. 456 */ 457u32 drv_get_first_dev_extension(void) 458{ 459 u32 dw_dev_extension = 0; 460 struct drv_object *pdrv_obj; 461 462 if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) { 463 464 if ((pdrv_obj->dev_node_string != NULL) && 465 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) { 466 dw_dev_extension = 467 (u32) lst_first(pdrv_obj->dev_node_string); 468 } 469 } 470 471 return dw_dev_extension; 472} 473 474/* 475 * ======== drv_get_next_dev_object ======== 476 * Purpose: 477 * Retrieve the next Device Object handle from an internal linked list of 478 * of DEV_OBJECTs maintained by DRV, after having previously called 479 * drv_get_first_dev_object() and zero or more DRV_GetNext. 480 */ 481u32 drv_get_next_dev_object(u32 hdev_obj) 482{ 483 u32 dw_next_dev_object = 0; 484 struct drv_object *pdrv_obj; 485 486 DBC_REQUIRE(hdev_obj != 0); 487 488 if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) { 489 490 if ((pdrv_obj->dev_list != NULL) && 491 !LST_IS_EMPTY(pdrv_obj->dev_list)) { 492 dw_next_dev_object = (u32) lst_next(pdrv_obj->dev_list, 493 (struct list_head *) 494 hdev_obj); 495 } 496 } 497 return dw_next_dev_object; 498} 499 500/* 501 * ======== drv_get_next_dev_extension ======== 502 * Purpose: 503 * Retrieve the next Device Extension from an internal linked list of 504 * of pointer to DevNodeString maintained by DRV, after having previously 505 * called drv_get_first_dev_extension() and zero or more 506 * drv_get_next_dev_extension(). 507 */ 508u32 drv_get_next_dev_extension(u32 dev_extension) 509{ 510 u32 dw_dev_extension = 0; 511 struct drv_object *pdrv_obj; 512 513 DBC_REQUIRE(dev_extension != 0); 514 515 if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) { 516 if ((pdrv_obj->dev_node_string != NULL) && 517 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) { 518 dw_dev_extension = 519 (u32) lst_next(pdrv_obj->dev_node_string, 520 (struct list_head *)dev_extension); 521 } 522 } 523 524 return dw_dev_extension; 525} 526 527/* 528 * ======== drv_init ======== 529 * Purpose: 530 * Initialize DRV module private state. 531 */ 532int drv_init(void) 533{ 534 s32 ret = 1; /* function return value */ 535 536 DBC_REQUIRE(refs >= 0); 537 538 if (ret) 539 refs++; 540 541 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); 542 543 return ret; 544} 545 546/* 547 * ======== drv_insert_dev_object ======== 548 * Purpose: 549 * Insert a DevObject into the list of Manager object. 550 */ 551int drv_insert_dev_object(struct drv_object *driver_obj, 552 struct dev_object *hdev_obj) 553{ 554 struct drv_object *pdrv_object = (struct drv_object *)driver_obj; 555 556 DBC_REQUIRE(refs > 0); 557 DBC_REQUIRE(hdev_obj != NULL); 558 DBC_REQUIRE(pdrv_object); 559 DBC_ASSERT(pdrv_object->dev_list); 560 561 lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj); 562 563 DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list)); 564 565 return 0; 566} 567 568/* 569 * ======== drv_remove_dev_object ======== 570 * Purpose: 571 * Search for and remove a DeviceObject from the given list of DRV 572 * objects. 573 */ 574int drv_remove_dev_object(struct drv_object *driver_obj, 575 struct dev_object *hdev_obj) 576{ 577 int status = -EPERM; 578 struct drv_object *pdrv_object = (struct drv_object *)driver_obj; 579 struct list_head *cur_elem; 580 581 DBC_REQUIRE(refs > 0); 582 DBC_REQUIRE(pdrv_object); 583 DBC_REQUIRE(hdev_obj != NULL); 584 585 DBC_REQUIRE(pdrv_object->dev_list != NULL); 586 DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object->dev_list)); 587 588 /* Search list for p_proc_object: */ 589 for (cur_elem = lst_first(pdrv_object->dev_list); cur_elem != NULL; 590 cur_elem = lst_next(pdrv_object->dev_list, cur_elem)) { 591 /* If found, remove it. */ 592 if ((struct dev_object *)cur_elem == hdev_obj) { 593 lst_remove_elem(pdrv_object->dev_list, cur_elem); 594 status = 0; 595 break; 596 } 597 } 598 /* Remove list if empty. */ 599 if (LST_IS_EMPTY(pdrv_object->dev_list)) { 600 kfree(pdrv_object->dev_list); 601 pdrv_object->dev_list = NULL; 602 } 603 DBC_ENSURE((pdrv_object->dev_list == NULL) || 604 !LST_IS_EMPTY(pdrv_object->dev_list)); 605 606 return status; 607} 608 609/* 610 * ======== drv_request_resources ======== 611 * Purpose: 612 * Requests resources from the OS. 613 */ 614int drv_request_resources(u32 dw_context, u32 *dev_node_strg) 615{ 616 int status = 0; 617 struct drv_object *pdrv_object; 618 struct drv_ext *pszdev_node; 619 620 DBC_REQUIRE(dw_context != 0); 621 DBC_REQUIRE(dev_node_strg != NULL); 622 623 /* 624 * Allocate memory to hold the string. This will live untill 625 * it is freed in the Release resources. Update the driver object 626 * list. 627 */ 628 629 status = cfg_get_object((u32 *) &pdrv_object, REG_DRV_OBJECT); 630 if (!status) { 631 pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL); 632 if (pszdev_node) { 633 lst_init_elem(&pszdev_node->link); 634 strncpy(pszdev_node->sz_string, 635 (char *)dw_context, MAXREGPATHLENGTH - 1); 636 pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0'; 637 /* Update the Driver Object List */ 638 *dev_node_strg = (u32) pszdev_node->sz_string; 639 lst_put_tail(pdrv_object->dev_node_string, 640 (struct list_head *)pszdev_node); 641 } else { 642 status = -ENOMEM; 643 *dev_node_strg = 0; 644 } 645 } else { 646 dev_dbg(bridge, "%s: Failed to get Driver Object from Registry", 647 __func__); 648 *dev_node_strg = 0; 649 } 650 651 DBC_ENSURE((!status && dev_node_strg != NULL && 652 !LST_IS_EMPTY(pdrv_object->dev_node_string)) || 653 (status && *dev_node_strg == 0)); 654 655 return status; 656} 657 658/* 659 * ======== drv_release_resources ======== 660 * Purpose: 661 * Releases resources from the OS. 662 */ 663int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj) 664{ 665 int status = 0; 666 struct drv_object *pdrv_object = (struct drv_object *)hdrv_obj; 667 struct drv_ext *pszdev_node; 668 669 /* 670 * Irrespective of the status go ahead and clean it 671 * The following will over write the status. 672 */ 673 for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension(); 674 pszdev_node != NULL; pszdev_node = (struct drv_ext *) 675 drv_get_next_dev_extension((u32) pszdev_node)) { 676 if (!pdrv_object->dev_node_string) { 677 /* When this could happen? */ 678 continue; 679 } 680 if ((u32) pszdev_node == dw_context) { 681 /* Found it */ 682 /* Delete from the Driver object list */ 683 lst_remove_elem(pdrv_object->dev_node_string, 684 (struct list_head *)pszdev_node); 685 kfree((void *)pszdev_node); 686 break; 687 } 688 /* Delete the List if it is empty */ 689 if (LST_IS_EMPTY(pdrv_object->dev_node_string)) { 690 kfree(pdrv_object->dev_node_string); 691 pdrv_object->dev_node_string = NULL; 692 } 693 } 694 return status; 695} 696 697/* 698 * ======== request_bridge_resources ======== 699 * Purpose: 700 * Reserves shared memory for bridge. 701 */ 702static int request_bridge_resources(struct cfg_hostres *res) 703{ 704 struct cfg_hostres *host_res = res; 705 706 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */ 707 host_res->num_mem_windows = 2; 708 709 /* First window is for DSP internal memory */ 710 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); 711 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); 712 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); 713 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); 714 715 /* for 24xx base port is not mapping the mamory for DSP 716 * internal memory TODO Do a ioremap here */ 717 /* Second window is for DSP external memory shared with MPU */ 718 719 /* These are hard-coded values */ 720 host_res->birq_registers = 0; 721 host_res->birq_attrib = 0; 722 host_res->dw_offset_for_monitor = 0; 723 host_res->dw_chnl_offset = 0; 724 /* CHNL_MAXCHANNELS */ 725 host_res->dw_num_chnls = CHNL_MAXCHANNELS; 726 host_res->dw_chnl_buf_size = 0x400; 727 728 return 0; 729} 730 731/* 732 * ======== drv_request_bridge_res_dsp ======== 733 * Purpose: 734 * Reserves shared memory for bridge. 735 */ 736int drv_request_bridge_res_dsp(void **phost_resources) 737{ 738 int status = 0; 739 struct cfg_hostres *host_res; 740 u32 dw_buff_size; 741 u32 dma_addr; 742 u32 shm_size; 743 struct drv_data *drv_datap = dev_get_drvdata(bridge); 744 745 dw_buff_size = sizeof(struct cfg_hostres); 746 747 host_res = kzalloc(dw_buff_size, GFP_KERNEL); 748 749 if (host_res != NULL) { 750 request_bridge_resources(host_res); 751 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */ 752 host_res->num_mem_windows = 4; 753 754 host_res->dw_mem_base[0] = 0; 755 host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE, 756 OMAP_DSP_MEM1_SIZE); 757 host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE, 758 OMAP_DSP_MEM2_SIZE); 759 host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE, 760 OMAP_DSP_MEM3_SIZE); 761 host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE, 762 OMAP_PER_CM_SIZE); 763 host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE, 764 OMAP_PER_PRM_SIZE); 765 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, 766 OMAP_CORE_PRM_SIZE); 767 host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE, 768 OMAP_DMMU_SIZE); 769 770 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", 771 host_res->dw_mem_base[0]); 772 dev_dbg(bridge, "dw_mem_base[1] 0x%x\n", 773 host_res->dw_mem_base[1]); 774 dev_dbg(bridge, "dw_mem_base[2] 0x%x\n", 775 host_res->dw_mem_base[2]); 776 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", 777 host_res->dw_mem_base[3]); 778 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", 779 host_res->dw_mem_base[4]); 780 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); 781 782 shm_size = drv_datap->shm_size; 783 if (shm_size >= 0x10000) { 784 /* Allocate Physically contiguous, 785 * non-cacheable memory */ 786 host_res->dw_mem_base[1] = 787 (u32) mem_alloc_phys_mem(shm_size, 0x100000, 788 &dma_addr); 789 if (host_res->dw_mem_base[1] == 0) { 790 status = -ENOMEM; 791 pr_err("shm reservation Failed\n"); 792 } else { 793 host_res->dw_mem_length[1] = shm_size; 794 host_res->dw_mem_phys[1] = dma_addr; 795 796 dev_dbg(bridge, "%s: Bridge shm address 0x%x " 797 "dma_addr %x size %x\n", __func__, 798 host_res->dw_mem_base[1], 799 dma_addr, shm_size); 800 } 801 } 802 if (!status) { 803 /* These are hard-coded values */ 804 host_res->birq_registers = 0; 805 host_res->birq_attrib = 0; 806 host_res->dw_offset_for_monitor = 0; 807 host_res->dw_chnl_offset = 0; 808 /* CHNL_MAXCHANNELS */ 809 host_res->dw_num_chnls = CHNL_MAXCHANNELS; 810 host_res->dw_chnl_buf_size = 0x400; 811 dw_buff_size = sizeof(struct cfg_hostres); 812 } 813 *phost_resources = host_res; 814 } 815 /* End Mem alloc */ 816 return status; 817} 818 819void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size) 820{ 821 u32 pool_virt_base; 822 823 /* get the virtual address for the physical memory pool passed */ 824 pool_virt_base = (u32) ioremap(pool_phys_base, pool_size); 825 826 if ((void **)pool_virt_base == NULL) { 827 pr_err("%s: external physical memory map failed\n", __func__); 828 ext_phys_mem_pool_enabled = false; 829 } else { 830 ext_mem_pool.phys_mem_base = pool_phys_base; 831 ext_mem_pool.phys_mem_size = pool_size; 832 ext_mem_pool.virt_mem_base = pool_virt_base; 833 ext_mem_pool.next_phys_alloc_ptr = pool_phys_base; 834 ext_phys_mem_pool_enabled = true; 835 } 836} 837 838void mem_ext_phys_pool_release(void) 839{ 840 if (ext_phys_mem_pool_enabled) { 841 iounmap((void *)(ext_mem_pool.virt_mem_base)); 842 ext_phys_mem_pool_enabled = false; 843 } 844} 845 846/* 847 * ======== mem_ext_phys_mem_alloc ======== 848 * Purpose: 849 * Allocate physically contiguous, uncached memory from external memory pool 850 */ 851 852static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr) 853{ 854 u32 new_alloc_ptr; 855 u32 offset; 856 u32 virt_addr; 857 858 if (align == 0) 859 align = 1; 860 861 if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size) 862 - ext_mem_pool.next_phys_alloc_ptr)) { 863 phys_addr = NULL; 864 return NULL; 865 } else { 866 offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1)); 867 if (offset == 0) 868 new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr; 869 else 870 new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) + 871 (align - offset); 872 if ((new_alloc_ptr + bytes) <= 873 (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) { 874 /* we can allocate */ 875 *phys_addr = new_alloc_ptr; 876 ext_mem_pool.next_phys_alloc_ptr = 877 new_alloc_ptr + bytes; 878 virt_addr = 879 ext_mem_pool.virt_mem_base + (new_alloc_ptr - 880 ext_mem_pool. 881 phys_mem_base); 882 return (void *)virt_addr; 883 } else { 884 *phys_addr = 0; 885 return NULL; 886 } 887 } 888} 889 890/* 891 * ======== mem_alloc_phys_mem ======== 892 * Purpose: 893 * Allocate physically contiguous, uncached memory 894 */ 895void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask, 896 u32 *physical_address) 897{ 898 void *va_mem = NULL; 899 dma_addr_t pa_mem; 900 901 if (byte_size > 0) { 902 if (ext_phys_mem_pool_enabled) { 903 va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask, 904 (u32 *) &pa_mem); 905 } else 906 va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem, 907 GFP_KERNEL); 908 if (va_mem == NULL) 909 *physical_address = 0; 910 else 911 *physical_address = pa_mem; 912 } 913 return va_mem; 914} 915 916/* 917 * ======== mem_free_phys_mem ======== 918 * Purpose: 919 * Free the given block of physically contiguous memory. 920 */ 921void mem_free_phys_mem(void *virtual_address, u32 physical_address, 922 u32 byte_size) 923{ 924 DBC_REQUIRE(virtual_address != NULL); 925 926 if (!ext_phys_mem_pool_enabled) 927 dma_free_coherent(NULL, byte_size, virtual_address, 928 physical_address); 929} 930