Explore the power of machine learning and Apple Intelligence within apps. Discuss integrating features, share best practices, and explore the possibilities for your app here.

All subtopics

Post

Replies

Boosts

Views

Activity

crash when modelWithContentsOfURL in iOS 16+
We have a code that crashed The crash stack is as follows Thread 26 Crashed: 0 CoreFoundation 0x0000000198b0569c CFRelease + 44 1 CoreFoundation 0x0000000198b12334 __CFBasicHashRehash + 1172 2 CoreFoundation 0x0000000198b015dc __CFBasicHashAddValue + 100 3 CoreFoundation 0x0000000198b232e4 CFDictionarySetValue + 208 4 Foundation 0x00000001979b0378 _getStringAtMarker + 464 5 Foundation 0x00000001979b016c _NSXPCSerializationStringForObject + 56 6 Foundation 0x00000001979cec4c __44-[NSXPCDecoder _decodeArrayOfObjectsForKey:]_block_invoke + 52 7 Foundation 0x00000001979ceb90 _NSXPCSerializationIterateArrayObject + 208 8 Foundation 0x00000001979cda7c -[NSXPCDecoder _decodeArrayOfObjectsForKey:] + 240 9 Foundation 0x00000001979cd1bc -[NSDictionary(NSDictionary) initWithCoder:] + 176 10 Foundation 0x00000001979ae6e8 _decodeObject + 1264 11 Foundation 0x00000001979cec4c __44-[NSXPCDecoder _decodeArrayOfObjectsForKey:]_block_invoke + 52 12 Foundation 0x00000001979ceb90 _NSXPCSerializationIterateArrayObject + 208 13 Foundation 0x00000001979cda7c -[NSXPCDecoder _decodeArrayOfObjectsForKey:] + 240 14 Foundation 0x00000001979cd1a4 -[NSDictionary(NSDictionary) initWithCoder:] + 152 15 Foundation 0x00000001979ae6e8 _decodeObject + 1264 16 Foundation 0x00000001979ad030 -[NSXPCDecoder _decodeObjectOfClasses:atObject:] + 148 17 Foundation 0x0000000197a0a7f0 _NSXPCSerializationDecodeTypedObjCValuesFromArray + 892 18 Foundation 0x0000000197a0a1f8 _NSXPCSerializationDecodeInvocationArgumentArray + 412 19 Foundation 0x0000000197a0866c -[NSXPCDecoder __decodeXPCObject:allowingSimpleMessageSend:outInvocation:outArguments:outArgumentsMaxCount:outMethodSignature:outSelector:isReply:replySelector:] + 700 20 Foundation 0x0000000197a61078 -[NSXPCDecoder _decodeReplyFromXPCObject:forSelector:] + 76 21 Foundation 0x0000000197a5f690 -[NSXPCConnection _decodeAndInvokeReplyBlockWithEvent:sequence:replyInfo:] + 252 22 Foundation 0x0000000197a63664 __88-[NSXPCConnection _sendInvocation:orArguments:count:methodSignature:selector:withProxy:]_block_invoke_5 + 188 23 Foundation 0x0000000197a08058 -[NSXPCConnection _sendInvocation:orArguments:count:methodSignature:selector:withProxy:] + 2244 24 CoreFoundation 0x0000000198b19d88 ___forwarding___ + 1016 25 CoreFoundation 0x0000000198b198d0 _CF_forwarding_prep_0 + 96 26 AppleNeuralEngine 0x00000001e912ab1c -[_ANEDaemonConnection loadModel:sandboxExtension:options:qos:withReply:] + 332 27 AppleNeuralEngine 0x00000001e912a674 __44-[_ANEClient doLoadModel:options:qos:error:]_block_invoke + 360 28 libdispatch.dylib 0x00000001a0a21dd4 _dispatch_client_callout + 20 29 libdispatch.dylib 0x00000001a0a312c4 _dispatch_lane_barrier_sync_invoke_and_complete + 56 30 AppleNeuralEngine 0x00000001e9129ef0 -[_ANEClient doLoadModel:options:qos:error:] + 500 31 Espresso 0x00000001a7e02034 Espresso::ANERuntimeEngine::compiler::build_segment(std::__1::shared_ptr<Espresso::abstract_batch> const&, int, Espresso::net_compiler_segment_based::segment_t const&) + 3736 32 Espresso 0x00000001a7e010cc Espresso::net_compiler_segment_based::build(std::__1::shared_ptr<Espresso::abstract_batch> const&, int, int) + 384 33 Espresso 0x00000001a7df02a4 Espresso::ANERuntimeEngine::compiler::build(std::__1::shared_ptr<Espresso::abstract_batch> const&, int, int) + 120 34 Espresso 0x00000001a7e1b3a4 Espresso::net::__build(std::__1::shared_ptr<Espresso::abstract_batch> const&, int, int) + 360 35 Espresso 0x00000001a7e178e0 Espresso::abstract_context::compute_batch_sync(void (std::__1::shared_ptr<Espresso::abstract_batch> const&) block_pointer) + 112 36 Espresso 0x00000001a7e198b8 EspressoLight::espresso_plan::prepare_compiler_if_needed() + 3208 37 Espresso 0x00000001a7e183f4 EspressoLight::espresso_plan::prepare() + 1712 38 Espresso 0x00000001a7da8e78 espresso_plan_build_with_options + 300 39 Espresso 0x00000001a7da8d30 espresso_plan_build + 44 40 CoreML 0x00000001b346645c -[MLNeuralNetworkEngine rebuildPlan:error:] + 536 41 CoreML 0x00000001b3464294 -[MLNeuralNetworkEngine _setupContextAndPlanWithConfiguration:usingCPU:reshapeWithContainer:error:] + 3132 42 CoreML 0x00000001b34797a0 -[MLNeuralNetworkEngine initWithContainer:configuration:error:] + 196 43 CoreML 0x00000001b347962c +[MLNeuralNetworkEngine loadModelFromCompiledArchive:modelVersionInfo:compilerVersionInfo:configuration:error:] + 164 44 CoreML 0x00000001b34792a0 +[MLLoader _loadModelWithClass:fromArchive:modelVersionInfo:compilerVersionInfo:configuration:error:] + 144 45 CoreML 0x00000001b3478c64 +[MLLoader _loadModelFromArchive:configuration:modelVersion:compilerVersion:loaderEvent:useUpdatableModelLoaders:loadingClasses:error:] + 532 46 CoreML 0x00000001b34650c8 +[MLLoader _loadWithModelLoaderFromArchive:configuration:loaderEvent:useUpdatableModelLoaders:error:] + 424 47 CoreML 0x00000001b3474bc8 +[MLLoader _loadModelFromArchive:configuration:loaderEvent:useUpdatableModelLoaders:error:] + 460 48 CoreML 0x00000001b347a024 +[MLLoader _loadModelFromAssetAtURL:configuration:loaderEvent:error:] + 244 49 CoreML 0x00000001b3479cbc +[MLLoader loadModelFromAssetAtURL:configuration:error:] + 104 50 CoreML 0x00000001b347ac2c -[MLModelAsset load:] + 564 51 CoreML 0x00000001b347a9c4 -[MLModelAsset modelWithError:] + 24 52 CoreML 0x00000001b347a7b4 +[MLModel modelWithContentsOfURL:configuration:error:] + 172 53 CoreML 0x00000001b37afbc4 +[MLModel modelWithContentsOfURL:error:] + 76 Core code MLModel* model = nil; NSError *error = nil; @try { model = [MLModel modelWithContentsOfURL:modelURL error:&error]; } @catch (NSException *exception) { model = nil; return Ret_OperationErr_InvalidInit; } Two question: What does this stack mean? I added @ try @ catch, why is it still crashing?
1
0
250
3w
Use iPad M1 processor as GPU
Hello, I’m currently working on Tiny ML or ML on Edge using the Google Colab platform. Due to the exhaust of my compute unit’s free usage, I’m being prompted to pay. I’ve been considering leveraging the GPU capabilities of my iPad M1 and Intel-based Mac. Both devices utilize Thunderbolt ports capable of sharing connections up to 30GB/s. Since I’m primarily using a classification model, extensive GPU usage isn’t necessary. I’m looking for assistance or guidance on utilizing the iPad’s processor as an eGPU on my Mac, possibly through an API or Apple technology. Any help would be greatly appreciated!
2
0
535
Jul ’24
Writing tools API
With iOS 18, Writing Tools are enabled for text fields all over the system. But under the hood, this uses Apple's on device LLM to summarize a piece of text. Is there any kind of Swift API to access this LLM summarization feature for pieces of text that I provide to the API? Instead of forcing the user to select the text.
3
7
615
Aug ’24
Issue with Using Pre-Allocated CVPixelBuffer for CoreML Model Prediction
Hello everyone, I have a PyTorch model that outputs an image. I converted this model to CoreML using coremltools, and the resulting CoreML model can be used in my iOS project to perform inference using the MLModel's prediction function, which returns a result of type CVPixelBuffer. I want to avoid allocating memory every time I call the prediction function. Instead, I would like to use a pre-allocated buffer. I noticed that MLModel provides an overloaded prediction function that accepts an MLPredictionOptions object. This object has an outputBackings member, which allows me to pass a pre-allocated CVPixelBuffer. However, when I attempt to do this, I encounter the following error: Copy from tensor to pixel buffer (pixel_format_type: BGRA, image_pixel_type: BGR8, component_dtype: INT, component_pack: FMT_32) is not supported. Could someone point out what I might be doing wrong? How can I make MLModel use my pre-allocated CVPixelBuffer instead of creating a new one each time? Here is the Python code I used to convert the PyTorch model to CoreML, where I specified the color_layout as coremltools.colorlayout.BGR: def export_ml(model, resolution="640x360"): ml_path = f"model.mlpackage" print("exporting ml model") width, height = map(int, resolution.split('x')) img0 = torch.randn(1, 3, height, width) img1 = torch.randn(1, 3, height, width) traced_model = torch.jit.trace(model, (img0, img1)) input_shape = ct.Shape(shape=(1, 3, height, width)) output_type_img = ct.ImageType(name="out", scale=1.0, bias=[0, 0, 0], color_layout=ct.colorlayout.BGR) ml_model = ct.convert( traced_model, inputs=[input_type_img0, input_type_img1], outputs=[output_type_img] ) ml_model.save(ml_path) Here is the Swift code in my iOS project that calls the MLModel's prediction function: func prediction(image1: CVPixelBuffer, image2: CVPixelBuffer, model: MLModel) -> CVPixelBuffer? { let options = MLPredictionOptions() guard let outputBuffer = outputBacking else { fatalError("Failed to create CVPixelBuffer.") } options.outputBackings = ["out": outputBuffer] // Perform the prediction guard let prediction = try? model.prediction(from: RifeInput(img0: image1, img1: image2), options: options) else { Log.i("Failed to perform prediction") return nil } // Extract the result guard let cvPixelBuffer = prediction.featureValue(for: "out")?.imageBufferValue else { Log.i("Failed to get results from the model") return nil } return cvPixelBuffer } Here is the code I used to create the outputBacking: let attributes: [String: Any] = [ kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true, kCVPixelBufferWidthKey as String: Int(640), kCVPixelBufferHeightKey as String: Int(360), kCVPixelBufferIOSurfacePropertiesKey as String: [:] ] let status = CVPixelBufferCreate(kCFAllocatorDefault, 640, 360, kCVPixelFormatType_32BGRA, attributes as CFDictionary, &outputBacking) guard let outputBuffer = outputBacking else { fatalError("Failed to create CVPixelBuffer.") } Any help or guidance would be greatly appreciated! Thank you!
1
0
254
3w
TensorFlow Metal not installable on M2 MacBook
I've been attempting to install tf metal on my computer so that I can use GPUs instead of CPUs. I have tf macOS installed already, and I am fully updated with pip and tf. I'm currently 2 months into building and training a tf CNN, and I'm at the point where training a single epoch for my network will take a week (I have a lot of data that I need to use). I desperately need to use GPUs but am stuck with CPUs for now. I can't get access to a cluster, so the best I can do is continue to use my M2 MacBook. Is there any other way I can install TF metal? Is there a way I can use GPUs (rather than CPUs) when using TF if I can't get install metal? I keep getting this error message: "ERROR: Could not find a version that satisfies the requirement tensorflow-metal (from versions: none) ERROR: No matching distribution found for tensorflow-metal" I looked on apple forums, tried to download it from GitHub (the page is down), and anything else I could think of and/or find on the internet to help, but it still isn't installing. I've used the following commands and still no luck: python -m pip install tensorflow-metal pip install https://github.com/apple/tensorflow_metal/releases/download/v0.5.0/tensorflow_metal-0.5.0-py3-none-any.whl pip install tensorflow-metal pip3 install tensorflow-metal SYSTEM_VERSION_COMPAT=0 python -m pip install tensorflow-metal SYSTEM_VERSION_COMPAT=0 pip install tensorflow-macos tensorflow-metal conda install -c anaconda tensorflow-gpu Any help would be appreciated! Thanks so much!
2
1
462
Aug ’24
Error in TensorFlow in MacBook Air M1 (macOS Monterey)
getting this error again and again even if I tried reinstalling. Traceback (most recent call last): File "", line 1, in File "/Users/aman/LLM/env/lib/python3.8/site-packages/tensorflow/init.py", line 439, in _ll.load_library(_plugin_dir) File "/Users/aman/LLM/env/lib/python3.8/site-packages/tensorflow/python/framework/load_library.py", line 151, in load_library py_tf.TF_LoadLibrary(lib) tensorflow.python.framework.errors_impl.NotFoundError: dlopen(/Users/aman/LLM/env/lib/python3.8/site-packages/tensorflow-plugins/libmetal_plugin.dylib, 0x0006): Symbol not found: OBJC_CLASS$_MPSGraphRandomOpDescriptor Referenced from: /Users/aman/LLM/env/lib/python3.8/site-packages/tensorflow-plugins/libmetal_plugin.dylib Expected in: /System/Library/Frameworks/MetalPerformanceShadersGraph.framework/Versions/A/MetalPerformanceShadersGraph
1
0
409
Aug ’24
Using AssistantEntity with existing AppEntities for iOS17
Hi, I have an existing app with AppEntities defined, that works on iOS16 and iOS17. The AppEntities also have EntityPropertyQuery defined, so they work as 'find intents'. I want to use the new @AssistantEntity on iOS18, while supporting the previous versions. What's the best way to do this? For e.g. I have a 'person' AppEntity: @available(iOS 16.0, macOS 13.0, watchOS 9.0, tvOS 16.0, *) struct CJLogAppEntity: AppEntity { static var defaultQuery = CJLogAppEntityQuery() .... } struct CJLogAppEntityQuery: EntityPropertyQuery { ... } How do I adopt this with @AssistantEntity(schema: .journal.entry) for iOS18, while maintaining compatibility with iOS16 and 17?
0
0
267
3w
Video Background Removal
I am searching for a method to remove background from a video. it can be from camera Session fileOutput url or from photo library. I was able to accomplish live preview of removed background with the depth data and some metal framework code from the example Enhancing Live Video by Leveraging TrueDepth Camera Data. However I count figure out a way to save this as a video so that I can upload it. Also this method is using over 150% of cpu ( Xcode cpu usage ), which seems to be quite a lot and the device is getting heated up so fast and drops the frames when It hot. I also found something similar from GitHub using CoreML example by Dmitry Voitekh which only uses less than 40% cpu. Any information regarding this will be helpful. Objective : Remove Background from video and save it
5
0
427
3w
H1xANELoadBalancer is taking longer to load
We have an application that receives a message (through MQTT) from an external system to snap a photo, runs a CoreML vision request on the image, and then sends the results back. The customer has 100s of devices and recently on a couple of those devices (13 pros), the customer encountered an issue in which the devices were not responding in time. There was no crash, just some individual inferences were slowed down. The device performs 1000s of requests per day. Upon further evaluation of the request before and after in the device logs, I noticed that Apple loads the following default 2024-09-04 13:18:31.310401 -0400 ProcessName Processing image for reference: *** default 2024-09-04 13:18:31.403606 -0400 ProcessName Found matching service: H1xANELoadBalancer default 2024-09-04 13:18:31.403646 -0400 ProcessName Found matching service: H11ANEIn default 2024-09-04 13:18:31.403661 -0400 ProcessName Found ANE device :1 default 2024-09-04 13:18:31.403681 -0400 ProcessName Total num of devices 1 default 2024-09-04 13:18:31.403681 -0400 ProcessName (Single-ANE System) Opening H11ANE device at index 0 default 2024-09-04 13:18:31.403681 -0400 ProcessName H11ANEDevice::H11ANEDeviceOpen, usage type: 1 In a good scenario (above), these actions will performed very quickly (in a split second). The app doesn't do anything until coreml inference result is returned. In the bad scenario (below), there is a delay of about 4 seconds from app passing the control to vision request and then getting the response back (leading to timeouts with the customer) default 2024-09-04 13:19:08.777468 -0400 ProcessName Processing image for reference: ZZZ default 2024-09-04 13:19:12.199758 -0400 ProcessName Found matching service: H1xANELoadBalancer default 2024-09-04 13:19:12.199800 -0400 ProcessName Found matching service: H11ANEIn default 2024-09-04 13:19:12.199812 -0400 ProcessName Found ANE device :1 default 2024-09-04 13:19:12.199832 -0400 ProcessName Total num of devices 1 default 2024-09-04 13:19:12.199834 -0400 ProcessName (Single-ANE System) Opening H11ANE device at index 0 default 2024-09-04 13:19:12.199834 -0400 ProcessName H11ANEDevice::H11ANEDeviceOpen, usage type: 1 The logs are in order, I haven't removed anything. The code is fairly simple, it's just running a vision request without doing much. Has anyone encountered this before?
0
1
217
3w
How to Ensure Quantized Models Run on ANE on iPhone 15 (iOS 18 Beta 8)
When I use CoreML to infer a w8a8 model on iPhone 15 (iOS 18 beta 8), the model uses CPU inference instead of ANE, which results in slower inference speed. The model I am using is from the coremltools documentation, which indicates that on iOS 17, quantized models can run on ANE properly and achieve faster speeds. How can I make the quantized model run correctly on ANE to achieve the desired inference speed? To reproduce this issue, you can download the Weight & Activation quantized model from the following link: https://apple.github.io/coremltools/docs-guides/source/opt-quantization-perf.html.
0
0
255
3w
UI interface for on device LLMs / Foundation models
I was watching wwdc2024 Deploy machine learning and AI models on-device with Core ML (https://developer.apple.com/videos/play/wwdc2024/10161/) and speaker was showing UI interface where he was ruining on device LLMs / Foundation models. I was wondering if this UI interface is open source and I can download and play around with similar app what was shown:
1
1
375
Aug ’24
Apple Intelligence download stuck on 99%
I just installed iOS 18.1 Beta 3 on my iPad M4 (I was previously on 18.0 betas). I did the the same thing on my iPhone 15 Pro Max which works perfectly. However on the iPad, it seems to be stuck on 99% and won't complete downloading. The status message near the top keeps switching between "downloading" and "will continue later on WiFi". Note, I'm connected to my home WiFi, very fast and iPhone was on the same network and downloaded quickly without issue. Is there a way to reset and start again since it's stuck? This is really frustrating. This has been going on for several hours at this point.
26
7
5.6k
Aug ’24
Siri Intent Dismiss callback issue
I am opening the Siri shortcut screen from the viewDidLoad method, as follows: override func viewDidLoad() { super.viewDidLoad() // Present the Siri Shortcut screen to add Card Payment Intent let viewController = INUIAddVoiceShortcutViewController(shortcut: INShortcut(intent: self.cardPaymentIntent)!) viewController.modalPresentationStyle = .pageSheet // Setting Delegate viewController.delegate = self self.present(viewController, animated: true, completion: nil) } // Delegate Method Conformance :: INUIAddVoiceShortcutViewControllerDelegate @available(iOS 12.0, *) func addVoiceShortcutViewController(_ controller: INUIAddVoiceShortcutViewController, didFinishWith voiceShortcut: INVoiceShortcut?, error: Error?) { controller.dismiss(animated: true, completion: nil) // The issue is here. Whether we add the or Dismiss the Siri shortcut screen without adding it, this delegate gets called. } @available(iOS 12.0, *) func addVoiceShortcutViewControllerDidCancel(_ controller: INUIAddVoiceShortcutViewController) { controller.dismiss(animated: true, completion: nil) } // Card Payment Intent public var cardPaymentIntent: CardPaymentIntent { let intent = CardPaymentIntent() intent.suggestedInvocationPhrase = NSLocalizedString("Pay my credit card", comment: "") return intent } Whenever I present the siri shortcut screen, either I add the shortcut or dismiss the screen without adding. In both cases , the shortcut is added. And this method is called every time func addVoiceShortcutViewController(_ controller: INUIAddVoiceShortcutViewController, didFinishWith voiceShortcut: INVoiceShortcut?, error: Error?) Any solution ? while I dismiss the screen, i want it not to be added into the shortcut
1
0
354
Jul ’24
Vision framework not working on Apple Vision Pro
com.apple.Vision Code=9 "Could not build inference plan - ANECF error: failed to load ANE model file:///System/Library/Frameworks/ Vision.framework/anodv4_drop6_fp16.H14G.espresso.hwx Code rise this error: func imageToHeadBox(image: CVPixelBuffer) async throws -> [CGRect] { let request:DetectFaceRectanglesRequest = DetectFaceRectanglesRequest() let faceResult:[FaceObservation] = try await request.perform(on: image) let faceBoxs:[CGRect] = faceResult.map { face in let faceBoundingBox:CGRect = face.boundingBox.cgRect return faceBoundingBox } return faceBoxs }
1
0
364
Aug ’24